diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE.md
similarity index 100%
rename from .github/ISSUE_TEMPLATE/bug_report.md
rename to .github/ISSUE_TEMPLATE.md
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 2a5e66a6e..ee7a23810 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -33,8 +33,9 @@ jobs:
         run: echo BUILD_TIME=$(date -u +%Y-%m-%d-%H-%M) >> ${GITHUB_ENV}
 
       - name: Go Release Binaries
-        uses: wangyoucao577/go-release-action@v1.14
+        uses: wangyoucao577/go-release-action@v1.17
         with:
+          goversion: 1.16
           github_token: ${{ secrets.GITHUB_TOKEN }}
           goos: ${{ matrix.goos }}
           goarch: ${{ matrix.goarch }}
@@ -49,8 +50,9 @@ jobs:
           asset_name: "weed-large-disk-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
 
       - name: Go Release Binaries
-        uses: wangyoucao577/go-release-action@v1.14
+        uses: wangyoucao577/go-release-action@v1.17
         with:
+          goversion: 1.16
           github_token: ${{ secrets.GITHUB_TOKEN }}
           goos: ${{ matrix.goos }}
           goarch: ${{ matrix.goarch }}
@@ -60,5 +62,5 @@ jobs:
           ldflags: -extldflags -static -X github.com/chrislusf/seaweedfs/weed/util.COMMIT=${{github.sha}}
           # Where to run `go build .`
           project_path: weed
-          binary_name: weed-
+          binary_name: weed
           asset_name: "weed-${{ env.BUILD_TIME }}-${{ matrix.goos }}-${{ matrix.goarch }}"
diff --git a/.travis.yml b/.travis.yml
index a5ebf415f..7934e1ead 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,7 +1,6 @@
 sudo: false
 language: go
 go:
-  - 1.15.x
   - 1.16.x
 
 before_install:
diff --git a/README.md b/README.md
index e463092bc..a28b13b98 100644
--- a/README.md
+++ b/README.md
@@ -68,12 +68,16 @@ Table of Contents
 * [License](#license)
 
 
-## Quick Start ##
+## Quick Start with single binary ##
 * Download the latest binary from https://github.com/chrislusf/seaweedfs/releases and unzip a single binary file `weed` or `weed.exe`
 * Run `weed server -dir=/some/data/dir -s3` to start one master, one volume server, one filer, and one S3 gateway.
 
 Also, to increase capacity, just add more volume servers by running `weed volume -dir="/some/data/dir2" -mserver="<master_host>:9333" -port=8081` locally, or on a different machine, or on thousands of machines. That is it!
 
+## Quick Start for S3 API on Docker ##
+
+`docker run -p 8333:8333 chrislusf/seaweedfs server -s3`
+
 ## Introduction ##
 
 SeaweedFS is a simple and highly scalable distributed file system. There are two objectives:
diff --git a/docker/Dockerfile b/docker/Dockerfile
index 6309e7e2e..2165466ca 100644
--- a/docker/Dockerfile
+++ b/docker/Dockerfile
@@ -1,6 +1,7 @@
 FROM alpine
 
-ARG RELEASE=latest # 'latest' or 'dev'
+# 'latest' or 'dev'
+ARG RELEASE=latest
 
 RUN \
     ARCH=$(if [ $(uname -m) == "x86_64" ] && [ $(getconf LONG_BIT) == "64" ]; then echo "amd64"; \
diff --git a/docker/Makefile b/docker/Makefile
index a933956b7..58d494b95 100644
--- a/docker/Makefile
+++ b/docker/Makefile
@@ -5,7 +5,9 @@ all: gen
 gen: dev
 
 binary:
-	cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static"; mv weed ../docker/
+	export SWCOMMIT=$(shell git rev-parse --short HEAD)
+	export SWLDFLAGS="-X github.com/chrislusf/seaweedfs/weed/util.COMMIT=$(SWCOMMIT)"
+	cd ../weed; CGO_ENABLED=0 GOOS=linux go build -ldflags "-extldflags -static $(SWLDFLAGS)"; mv weed ../docker/
 
 build: binary
 	docker build --no-cache -t chrislusf/seaweedfs:local -f Dockerfile.local .
diff --git a/docker/compose/local-dev-compose.yml b/docker/compose/local-dev-compose.yml
index 05103a7fc..01d0594a6 100644
--- a/docker/compose/local-dev-compose.yml
+++ b/docker/compose/local-dev-compose.yml
@@ -26,9 +26,10 @@ services:
   filer:
     image: chrislusf/seaweedfs:local
     ports:
+      - 8111:8111
       - 8888:8888
       - 18888:18888
-    command: '-v=1 filer -master="master:9333"'
+    command: '-v=1 filer -master="master:9333" -iam'
     depends_on:
       - master
       - volume
diff --git a/docker/compose/local-mount-compose.yml b/docker/compose/local-mount-compose.yml
index b1c579cdf..8c4329054 100644
--- a/docker/compose/local-mount-compose.yml
+++ b/docker/compose/local-mount-compose.yml
@@ -38,7 +38,7 @@ services:
   mount_2:
     image: chrislusf/seaweedfs:local
     privileged: true
-    entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4  mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1 -volumeServerAcess=publicUrl"'
+    entrypoint: '/bin/sh -c "mkdir -p t2 && mkdir -p cache/t2 && weed -v=4  mount -filer=filer:8888 -cacheDir=./cache/t2 -dir=./t2 -filer.path=/c1"'
     depends_on:
       - master
       - volume
diff --git a/go.mod b/go.mod
index e792af37b..f54c82994 100644
--- a/go.mod
+++ b/go.mod
@@ -15,7 +15,7 @@ require (
 	github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72
 	github.com/bwmarrin/snowflake v0.3.0
 	github.com/cespare/xxhash v1.1.0
-	github.com/chrislusf/raft v1.0.4
+	github.com/chrislusf/raft v1.0.6
 	github.com/coreos/go-semver v0.3.0 // indirect
 	github.com/dgrijalva/jwt-go v3.2.0+incompatible
 	github.com/disintegration/imaging v1.6.2
@@ -39,9 +39,10 @@ require (
 	github.com/google/uuid v1.1.1
 	github.com/gorilla/mux v1.7.4
 	github.com/gorilla/websocket v1.4.1 // indirect
-	github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4
+	github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 // indirect
 	github.com/grpc-ecosystem/grpc-gateway v1.11.0 // indirect
 	github.com/jcmturner/gofork v1.0.0 // indirect
+	github.com/jinzhu/copier v0.2.8
 	github.com/json-iterator/go v1.1.10
 	github.com/karlseguin/ccache v2.0.3+incompatible // indirect
 	github.com/karlseguin/ccache/v2 v2.0.7
@@ -61,7 +62,7 @@ require (
 	github.com/prometheus/client_golang v1.3.0
 	github.com/rakyll/statik v0.1.7
 	github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 // indirect
-	github.com/seaweedfs/fuse v1.1.3
+	github.com/seaweedfs/fuse v1.1.6
 	github.com/seaweedfs/goexif v1.0.2
 	github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
 	github.com/spaolacci/murmur3 v1.1.0 // indirect
@@ -89,7 +90,7 @@ require (
 	gocloud.dev/pubsub/rabbitpubsub v0.20.0
 	golang.org/x/image v0.0.0-20200119044424-58c23975cae1 // indirect
 	golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb
-	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
+	golang.org/x/sync v0.0.0-20200930132711-30421366ff76 // indirect
 	golang.org/x/sys v0.0.0-20201022201747-fb209a7c41cd
 	golang.org/x/tools v0.0.0-20200608174601-1b747fd94509
 	google.golang.org/api v0.26.0
diff --git a/go.sum b/go.sum
index 31f5520a8..f2cc6d26d 100644
--- a/go.sum
+++ b/go.sum
@@ -155,6 +155,10 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+
 github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/chrislusf/raft v1.0.4 h1:THhbsVik2hxdE0/VXX834f64Wn9RzgVPp+E+XCWZdKM=
 github.com/chrislusf/raft v1.0.4/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
+github.com/chrislusf/raft v1.0.5 h1:g8GxKCSStfm0/bGBDpNEbmEXL6MJkpXX+NI0ksbX5D4=
+github.com/chrislusf/raft v1.0.5/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
+github.com/chrislusf/raft v1.0.6 h1:wunb85WWhMKhNRn7EmdIw35D4Lmew0ZJv8oYDizR/+Y=
+github.com/chrislusf/raft v1.0.6/go.mod h1:Ep5DP+mJSosjfKiix1uU7Lc2Df/SX4oGJEpZlXH5l68=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -435,6 +439,8 @@ github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjL
 github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
 github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
 github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
+github.com/jinzhu/copier v0.2.8 h1:N8MbL5niMwE3P4dOwurJixz5rMkKfujmMRFmAanSzWE=
+github.com/jinzhu/copier v0.2.8/go.mod h1:24xnZezI2Yqac9J61UC6/dG/k76ttpq0DdJI3QmUvro=
 github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM=
 github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -687,6 +693,12 @@ github.com/seaweedfs/fuse v1.1.1 h1:WD51YFJcBViOx8I89jeqPD+vAKl4EowzBy9GUw0plb0=
 github.com/seaweedfs/fuse v1.1.1/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
 github.com/seaweedfs/fuse v1.1.3 h1:0DddotXwSRGbYG2kynoJyr8GHCy30Z2SpdhP3vdyijY=
 github.com/seaweedfs/fuse v1.1.3/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
+github.com/seaweedfs/fuse v1.1.4 h1:YYqkK86agMhXRSwR+wFbRI8ikMgk3kL6PNTna1MAHyQ=
+github.com/seaweedfs/fuse v1.1.4/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
+github.com/seaweedfs/fuse v1.1.5 h1:wyuRh/mDvrvt8ZLDS7YdPSe6nczniSx4sQFs/Jonveo=
+github.com/seaweedfs/fuse v1.1.5/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
+github.com/seaweedfs/fuse v1.1.6 h1:kvCqaIsCEaYOBw5r8kJPUs9GcbwlIKcScnkPLT7HLuQ=
+github.com/seaweedfs/fuse v1.1.6/go.mod h1:+PP6WlkrRUG6KPE+Th2EX5To/PjHaFsvqg/UgQ39aj8=
 github.com/seaweedfs/goexif v1.0.2 h1:p+rTXYdQ2mgxd+1JaTrQ9N8DvYuw9UH9xgYmJ+Bb29E=
 github.com/seaweedfs/goexif v1.0.2/go.mod h1:MrKs5LK0HXdffrdCZrW3OIMegL2xXpC6ThLyXMyjdrk=
 github.com/secsy/goftp v0.0.0-20190720192957-f31499d7c79a h1:C6IhVTxNkhlb0tlCB6JfHOUv1f0xHPK7V8X4HlJZEJw=
@@ -961,8 +973,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ
 golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20200930132711-30421366ff76 h1:JnxiSYT3Nm0BT2a8CyvYyM6cnrWpidecD1UuSYbhKm0=
 golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
-golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/k8s/seaweedfs/Chart.yaml b/k8s/seaweedfs/Chart.yaml
index d8d069032..c4f6bcc3b 100644
--- a/k8s/seaweedfs/Chart.yaml
+++ b/k8s/seaweedfs/Chart.yaml
@@ -1,5 +1,5 @@
 apiVersion: v1
 description: SeaweedFS
 name: seaweedfs
-appVersion: "2.38"
-version: 2.38
+appVersion: "2.43"
+version: 2.43
diff --git a/k8s/seaweedfs/templates/volume-service.yaml b/k8s/seaweedfs/templates/volume-service.yaml
index a2b0b540e..0a9173fde 100644
--- a/k8s/seaweedfs/templates/volume-service.yaml
+++ b/k8s/seaweedfs/templates/volume-service.yaml
@@ -23,6 +23,6 @@ spec:
     targetPort: {{ .Values.volume.metricsPort }}
     protocol: TCP
 {{- end }}
-selector:
+  selector:
     app: {{ template "seaweedfs.name" . }}
     component: volume
\ No newline at end of file
diff --git a/k8s/seaweedfs/values.yaml b/k8s/seaweedfs/values.yaml
index 19c0c78a1..c75bb869c 100644
--- a/k8s/seaweedfs/values.yaml
+++ b/k8s/seaweedfs/values.yaml
@@ -4,7 +4,7 @@ global:
   registry: ""
   repository: ""
   imageName: chrislusf/seaweedfs
-  # imageTag: "2.38" - started using {.Chart.appVersion}
+  # imageTag: "2.43" - started using {.Chart.appVersion}
   imagePullPolicy: IfNotPresent
   imagePullSecrets: imagepullsecret
   restartPolicy: Always
diff --git a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
index 257a9873d..0a8356258 100644
--- a/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
+++ b/other/java/client/src/main/java/seaweedfs/client/FilerClient.java
@@ -126,6 +126,18 @@ public class FilerClient extends FilerGrpcClient {
 
     }
 
+    public boolean exists(String path){
+        File pathFile = new File(path);
+        String parent = pathFile.getParent();
+        String entryName = pathFile.getName();
+        if(parent == null) {
+            parent = path;
+            entryName  ="";
+        }
+        return lookupEntry(parent, entryName) != null;
+
+    }
+
     public boolean rm(String path, boolean isRecursive, boolean ignoreRecusiveError) {
 
         File pathFile = new File(path);
@@ -142,10 +154,12 @@ public class FilerClient extends FilerGrpcClient {
 
     public boolean touch(String path, int mode) {
         String currentUser = System.getProperty("user.name");
-        return touch(path, mode, 0, 0, currentUser, new String[]{});
+
+        long now = System.currentTimeMillis() / 1000L;
+        return touch(path, now, mode, 0, 0, currentUser, new String[]{});
     }
 
-    public boolean touch(String path, int mode, int uid, int gid, String userName, String[] groupNames) {
+    public boolean touch(String path, long modifiedTimeSecond, int mode, int uid, int gid, String userName, String[] groupNames) {
 
         File pathFile = new File(path);
         String parent = pathFile.getParent().replace('\\','/');
@@ -155,17 +169,25 @@ public class FilerClient extends FilerGrpcClient {
         if (entry == null) {
             return createEntry(
                     parent,
-                    newFileEntry(name, mode, uid, gid, userName, groupNames).build()
+                    newFileEntry(name, modifiedTimeSecond, mode, uid, gid, userName, groupNames).build()
             );
         }
-        long now = System.currentTimeMillis() / 1000L;
-        FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder()
-                .setMtime(now)
-                .setUid(uid)
-                .setGid(gid)
-                .setUserName(userName)
-                .clearGroupName()
-                .addAllGroupName(Arrays.asList(groupNames));
+        FilerProto.FuseAttributes.Builder attr = entry.getAttributes().toBuilder();
+        if (modifiedTimeSecond>0) {
+            attr.setMtime(modifiedTimeSecond);
+        }
+        if (uid>0) {
+            attr.setUid(uid);
+        }
+        if (gid>0) {
+            attr.setGid(gid);
+        }
+        if (userName!=null) {
+            attr.setUserName(userName);
+        }
+        if (groupNames!=null) {
+            attr.clearGroupName().addAllGroupName(Arrays.asList(groupNames));
+        }
         return updateEntry(parent, entry.toBuilder().setAttributes(attr).build());
     }
 
@@ -188,17 +210,15 @@ public class FilerClient extends FilerGrpcClient {
                         .addAllGroupName(Arrays.asList(groupNames)));
     }
 
-    public FilerProto.Entry.Builder newFileEntry(String name, int mode,
+    public FilerProto.Entry.Builder newFileEntry(String name, long modifiedTimeSecond, int mode,
                                                  int uid, int gid, String userName, String[] groupNames) {
 
-        long now = System.currentTimeMillis() / 1000L;
-
         return FilerProto.Entry.newBuilder()
                 .setName(name)
                 .setIsDirectory(false)
                 .setAttributes(FilerProto.FuseAttributes.newBuilder()
-                        .setMtime(now)
-                        .setCrtime(now)
+                        .setMtime(modifiedTimeSecond)
+                        .setCrtime(modifiedTimeSecond)
                         .setUid(uid)
                         .setGid(gid)
                         .setFileMode(mode)
diff --git a/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java
index 4e40ce1b6..6097b8d56 100644
--- a/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java
+++ b/other/java/client/src/main/java/seaweedfs/client/SeaweedInputStream.java
@@ -6,6 +6,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.EOFException;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.nio.ByteBuffer;
@@ -34,6 +35,10 @@ public class SeaweedInputStream extends InputStream {
         this.entry = filerClient.lookupEntry(
                 SeaweedOutputStream.getParentDirectory(fullpath),
                 SeaweedOutputStream.getFileName(fullpath));
+        if(entry == null){
+            throw new FileNotFoundException();
+        }
+
         this.contentLength = SeaweedRead.fileSize(entry);
 
         this.visibleIntervalList = SeaweedRead.nonOverlappingVisibleIntervals(filerClient, entry.getChunksList());
diff --git a/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
index eaf17e5c6..f9a2c3f76 100644
--- a/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
+++ b/other/java/client/src/test/java/seaweedfs/client/SeaweedFilerTest.java
@@ -16,8 +16,17 @@ public class SeaweedFilerTest {
         filerClient.mkdirs("/new_folder", 0755);
         filerClient.touch("/new_folder/new_empty_file", 0755);
         filerClient.touch("/new_folder/new_empty_file2", 0755);
+        if(!filerClient.exists("/new_folder/new_empty_file")){
+            System.out.println("/new_folder/new_empty_file should exists");
+        }
+
         filerClient.rm("/new_folder/new_empty_file", false, true);
         filerClient.rm("/new_folder", true, true);
-
+        if(filerClient.exists("/new_folder/new_empty_file")){
+            System.out.println("/new_folder/new_empty_file should not exists");
+        }
+        if(!filerClient.exists("/")){
+            System.out.println("/ should exists");
+        }
     }
 }
diff --git a/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile2.java b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile2.java
new file mode 100644
index 000000000..61d8c290f
--- /dev/null
+++ b/other/java/examples/src/main/java/com/seaweedfs/examples/ExampleWriteFile2.java
@@ -0,0 +1,22 @@
+package com.seaweedfs.examples;
+
+import com.google.common.io.Files;
+import seaweedfs.client.FilerClient;
+import seaweedfs.client.SeaweedOutputStream;
+
+import java.io.File;
+import java.io.IOException;
+
+public class ExampleWriteFile2 {
+
+    public static void main(String[] args) throws IOException {
+
+        FilerClient filerClient = new FilerClient("localhost", 18888);
+
+        SeaweedOutputStream seaweedOutputStream = new SeaweedOutputStream(filerClient, "/test/1");
+        Files.copy(new File("/etc/resolv.conf"), seaweedOutputStream);
+        seaweedOutputStream.close();
+
+    }
+
+}
diff --git a/unmaintained/repeated_vacuum/repeated_vacuum.go b/unmaintained/repeated_vacuum/repeated_vacuum.go
index bff5becc1..d85e45af0 100644
--- a/unmaintained/repeated_vacuum/repeated_vacuum.go
+++ b/unmaintained/repeated_vacuum/repeated_vacuum.go
@@ -52,7 +52,7 @@ func main() {
 }
 
 func genFile(grpcDialOption grpc.DialOption, i int) (*operation.AssignResult, string) {
-	assignResult, err := operation.Assign(*master, grpcDialOption, &operation.VolumeAssignRequest{
+	assignResult, err := operation.Assign(func() string { return *master }, grpcDialOption, &operation.VolumeAssignRequest{
 		Count:       1,
 		Replication: *replication,
 	})
diff --git a/weed/Makefile b/weed/Makefile
index 8f1257d09..edc0bf544 100644
--- a/weed/Makefile
+++ b/weed/Makefile
@@ -16,7 +16,7 @@ debug_shell:
 
 debug_mount:
 	go build -gcflags="all=-N -l"
-	dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
+	dlv --listen=:2345 --headless=true --api-version=2 --accept-multiclient exec weed -- -v=4 mount -dir=~/tmp/mm -cacheCapacityMB=0 -filer.path=/buckets
 
 debug_server:
 	go build -gcflags="all=-N -l"
diff --git a/weed/command/command.go b/weed/command/command.go
index a9063eaa0..b6efcead2 100644
--- a/weed/command/command.go
+++ b/weed/command/command.go
@@ -22,9 +22,11 @@ var Commands = []*Command{
 	cmdFilerReplicate,
 	cmdFilerSynchronize,
 	cmdFix,
+	cmdGateway,
 	cmdMaster,
 	cmdMount,
 	cmdS3,
+	cmdIam,
 	cmdMsgBroker,
 	cmdScaffold,
 	cmdServer,
diff --git a/weed/command/export.go b/weed/command/export.go
index f100f3af5..1c32e1050 100644
--- a/weed/command/export.go
+++ b/weed/command/export.go
@@ -215,7 +215,7 @@ func runExport(cmd *Command, args []string) bool {
 
 	err = storage.ScanVolumeFile(util.ResolvePath(*export.dir), *export.collection, vid, storage.NeedleMapInMemory, volumeFileScanner)
 	if err != nil && err != io.EOF {
-		glog.Fatalf("Export Volume File [ERROR] %s\n", err)
+		glog.Errorf("Export Volume File [ERROR] %s\n", err)
 	}
 	return true
 }
diff --git a/weed/command/filer.go b/weed/command/filer.go
index 1b31dbcc7..a723b4d8a 100644
--- a/weed/command/filer.go
+++ b/weed/command/filer.go
@@ -25,6 +25,8 @@ var (
 	filerS3Options     S3Options
 	filerStartWebDav   *bool
 	filerWebDavOptions WebDavOption
+	filerStartIam      *bool
+	filerIamOptions    IamOptions
 )
 
 type FilerOptions struct {
@@ -91,6 +93,10 @@ func init() {
 	filerWebDavOptions.tlsCertificate = cmdFiler.Flag.String("webdav.cert.file", "", "path to the TLS certificate file")
 	filerWebDavOptions.cacheDir = cmdFiler.Flag.String("webdav.cacheDir", os.TempDir(), "local cache directory for file chunks")
 	filerWebDavOptions.cacheSizeMB = cmdFiler.Flag.Int64("webdav.cacheCapacityMB", 1000, "local cache capacity in MB")
+
+	// start iam on filer
+	filerStartIam = cmdFiler.Flag.Bool("iam", false, "whether to start IAM service")
+	filerIamOptions.port = cmdFiler.Flag.Int("iam.port", 8111, "iam server http listen port")
 }
 
 var cmdFiler = &Command{
@@ -108,7 +114,7 @@ var cmdFiler = &Command{
 	GET /path/to/
 
 	The configuration file "filer.toml" is read from ".", "$HOME/.seaweedfs/", "/usr/local/etc/seaweedfs/", or "/etc/seaweedfs/", in that order.
-	If the "filer.toml" is not found, an embedded filer store will be craeted under "-defaultStoreDir".
+	If the "filer.toml" is not found, an embedded filer store will be created under "-defaultStoreDir".
 
 	The example filer.toml configuration file can be generated by "weed scaffold -config=filer"
 
@@ -121,22 +127,33 @@ func runFiler(cmd *Command, args []string) bool {
 
 	go stats_collect.StartMetricsServer(*f.metricsHttpPort)
 
+	filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
+	startDelay := time.Duration(2)
 	if *filerStartS3 {
-		filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
 		filerS3Options.filer = &filerAddress
 		go func() {
-			time.Sleep(2 * time.Second)
+			time.Sleep(startDelay * time.Second)
 			filerS3Options.startS3Server()
 		}()
+		startDelay++
 	}
 
 	if *filerStartWebDav {
-		filerAddress := fmt.Sprintf("%s:%d", *f.ip, *f.port)
 		filerWebDavOptions.filer = &filerAddress
 		go func() {
-			time.Sleep(2 * time.Second)
+			time.Sleep(startDelay * time.Second)
 			filerWebDavOptions.startWebDav()
 		}()
+		startDelay++
+	}
+
+	if *filerStartIam {
+		filerIamOptions.filer = &filerAddress
+		filerIamOptions.masters = f.masters
+		go func() {
+			time.Sleep(startDelay * time.Second)
+			filerIamOptions.startIamServer()
+		}()
 	}
 
 	f.startFiler()
diff --git a/weed/command/filer_cat.go b/weed/command/filer_cat.go
index c4281feba..a46098b04 100644
--- a/weed/command/filer_cat.go
+++ b/weed/command/filer_cat.go
@@ -110,7 +110,7 @@ func runFilerCat(cmd *Command, args []string) bool {
 
 		filerCat.filerClient = client
 
-		return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
+		return filer.StreamContent(&filerCat, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
 
 	})
 
diff --git a/weed/command/filer_copy.go b/weed/command/filer_copy.go
index a36bb8cea..dc729ed33 100644
--- a/weed/command/filer_copy.go
+++ b/weed/command/filer_copy.go
@@ -207,16 +207,6 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
 	}
 
 	mode := fi.Mode()
-	if mode.IsDir() {
-		files, _ := ioutil.ReadDir(fileOrDir)
-		for _, subFileOrDir := range files {
-			if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
-				return err
-			}
-		}
-		return nil
-	}
-
 	uid, gid := util.GetFileUidGid(fi)
 
 	fileCopyTaskChan <- FileCopyTask{
@@ -228,6 +218,16 @@ func genFileCopyTask(fileOrDir string, destPath string, fileCopyTaskChan chan Fi
 		gid:                gid,
 	}
 
+	if mode.IsDir() {
+		files, _ := ioutil.ReadDir(fileOrDir)
+		println("checking directory", fileOrDir)
+		for _, subFileOrDir := range files {
+			if err = genFileCopyTask(fileOrDir+"/"+subFileOrDir.Name(), destPath+fi.Name()+"/", fileCopyTaskChan); err != nil {
+				return err
+			}
+		}
+	}
+
 	return nil
 }
 
@@ -293,38 +293,42 @@ func (worker *FileCopyWorker) uploadFileAsOne(task FileCopyTask, f *os.File) err
 
 	// upload the file content
 	fileName := filepath.Base(f.Name())
-	mimeType := detectMimeType(f)
-	data, err := ioutil.ReadAll(f)
-	if err != nil {
-		return err
-	}
+	var mimeType string
 
 	var chunks []*filer_pb.FileChunk
 	var assignResult *filer_pb.AssignVolumeResponse
 	var assignError error
 
-	if task.fileSize > 0 {
+	if task.fileMode&os.ModeDir == 0 && task.fileSize > 0 {
+
+		mimeType = detectMimeType(f)
+		data, err := ioutil.ReadAll(f)
+		if err != nil {
+			return err
+		}
 
 		// assign a volume
-		err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+		err = util.Retry("assignVolume", func() error {
+			return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
 
-			request := &filer_pb.AssignVolumeRequest{
-				Count:       1,
-				Replication: *worker.options.replication,
-				Collection:  *worker.options.collection,
-				TtlSec:      worker.options.ttlSec,
-				DiskType:    *worker.options.diskType,
-				Path:        task.destinationUrlPath,
-			}
+				request := &filer_pb.AssignVolumeRequest{
+					Count:       1,
+					Replication: *worker.options.replication,
+					Collection:  *worker.options.collection,
+					TtlSec:      worker.options.ttlSec,
+					DiskType:    *worker.options.diskType,
+					Path:        task.destinationUrlPath,
+				}
 
-			assignResult, assignError = client.AssignVolume(context.Background(), request)
-			if assignError != nil {
-				return fmt.Errorf("assign volume failure %v: %v", request, assignError)
-			}
-			if assignResult.Error != "" {
-				return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
-			}
-			return nil
+				assignResult, assignError = client.AssignVolume(context.Background(), request)
+				if assignError != nil {
+					return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+				}
+				if assignResult.Error != "" {
+					return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+				}
+				return nil
+			})
 		})
 		if err != nil {
 			return fmt.Errorf("Failed to assign from %v: %v\n", worker.options.masters, err)
@@ -402,31 +406,30 @@ func (worker *FileCopyWorker) uploadFileInChunks(task FileCopyTask, f *os.File,
 			// assign a volume
 			var assignResult *filer_pb.AssignVolumeResponse
 			var assignError error
-			err := pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
-				request := &filer_pb.AssignVolumeRequest{
-					Count:       1,
-					Replication: *worker.options.replication,
-					Collection:  *worker.options.collection,
-					TtlSec:      worker.options.ttlSec,
-					DiskType:    *worker.options.diskType,
-					Path:        task.destinationUrlPath + fileName,
-				}
+			err := util.Retry("assignVolume", func() error {
+				return pb.WithGrpcFilerClient(worker.filerGrpcAddress, worker.options.grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+					request := &filer_pb.AssignVolumeRequest{
+						Count:       1,
+						Replication: *worker.options.replication,
+						Collection:  *worker.options.collection,
+						TtlSec:      worker.options.ttlSec,
+						DiskType:    *worker.options.diskType,
+						Path:        task.destinationUrlPath + fileName,
+					}
 
-				assignResult, assignError = client.AssignVolume(context.Background(), request)
-				if assignError != nil {
-					return fmt.Errorf("assign volume failure %v: %v", request, assignError)
-				}
-				if assignResult.Error != "" {
-					return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
-				}
-				return nil
+					assignResult, assignError = client.AssignVolume(context.Background(), request)
+					if assignError != nil {
+						return fmt.Errorf("assign volume failure %v: %v", request, assignError)
+					}
+					if assignResult.Error != "" {
+						return fmt.Errorf("assign volume failure %v: %v", request, assignResult.Error)
+					}
+					return nil
+				})
 			})
 			if err != nil {
 				fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
 			}
-			if err != nil {
-				fmt.Printf("Failed to assign from %v: %v\n", worker.options.masters, err)
-			}
 
 			targetUrl := "http://" + assignResult.Url + "/" + assignResult.FileId
 			if collection == "" {
diff --git a/weed/command/gateway.go b/weed/command/gateway.go
new file mode 100644
index 000000000..8a6f852a5
--- /dev/null
+++ b/weed/command/gateway.go
@@ -0,0 +1,93 @@
+package command
+
+import (
+	"net/http"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"github.com/chrislusf/seaweedfs/weed/server"
+	"github.com/chrislusf/seaweedfs/weed/util"
+)
+
+var (
+	gatewayOptions GatewayOptions
+)
+
+type GatewayOptions struct {
+	masters *string
+	filers  *string
+	bindIp  *string
+	port    *int
+	maxMB   *int
+}
+
+func init() {
+	cmdGateway.Run = runGateway // break init cycle
+	gatewayOptions.masters = cmdGateway.Flag.String("master", "localhost:9333", "comma-separated master servers")
+	gatewayOptions.filers = cmdGateway.Flag.String("filer", "localhost:8888", "comma-separated filer servers")
+	gatewayOptions.bindIp = cmdGateway.Flag.String("ip.bind", "localhost", "ip address to bind to")
+	gatewayOptions.port = cmdGateway.Flag.Int("port", 5647, "gateway http listen port")
+	gatewayOptions.maxMB = cmdGateway.Flag.Int("maxMB", 4, "split files larger than the limit")
+}
+
+var cmdGateway = &Command{
+	UsageLine: "gateway -port=8888 -master=<ip:port>[,<ip:port>]* -filer=<ip:port>[,<ip:port>]*",
+	Short:     "start a gateway server that points to a list of master servers or a list of filers",
+	Long: `start a gateway server which accepts REST operation to write any blobs, files, or topic messages.
+
+	POST /blobs/
+		upload the blob and return a chunk id
+	DELETE /blobs/<chunk_id>
+		delete a chunk id
+
+	/*
+	POST /files/path/to/a/file
+		save /path/to/a/file on filer 
+	DELETE /files/path/to/a/file
+		delete /path/to/a/file on filer 
+
+	POST /topics/topicName
+		save on filer to /topics/topicName/<ds>/ts.json
+	*/
+`,
+}
+
+func runGateway(cmd *Command, args []string) bool {
+
+	util.LoadConfiguration("security", false)
+
+	gatewayOptions.startGateway()
+
+	return true
+}
+
+func (gw *GatewayOptions) startGateway() {
+
+	defaultMux := http.NewServeMux()
+
+	_, gws_err := weed_server.NewGatewayServer(defaultMux, &weed_server.GatewayOption{
+		Masters: strings.Split(*gw.masters, ","),
+		Filers:  strings.Split(*gw.filers, ","),
+		MaxMB:   *gw.maxMB,
+	})
+	if gws_err != nil {
+		glog.Fatalf("Gateway startup error: %v", gws_err)
+	}
+
+	glog.V(0).Infof("Start Seaweed Gateway %s at %s:%d", util.Version(), *gw.bindIp, *gw.port)
+	gatewayListener, e := util.NewListener(
+		*gw.bindIp+":"+strconv.Itoa(*gw.port),
+		time.Duration(10)*time.Second,
+	)
+	if e != nil {
+		glog.Fatalf("Filer listener error: %v", e)
+	}
+
+	httpS := &http.Server{Handler: defaultMux}
+	if err := httpS.Serve(gatewayListener); err != nil {
+		glog.Fatalf("Gateway Fail to serve: %v", e)
+	}
+
+}
diff --git a/weed/command/iam.go b/weed/command/iam.go
new file mode 100644
index 000000000..17d0832cb
--- /dev/null
+++ b/weed/command/iam.go
@@ -0,0 +1,97 @@
+package command
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"github.com/chrislusf/seaweedfs/weed/iamapi"
+	"github.com/chrislusf/seaweedfs/weed/pb"
+	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+	"github.com/chrislusf/seaweedfs/weed/security"
+	"github.com/chrislusf/seaweedfs/weed/util"
+	"github.com/gorilla/mux"
+	"time"
+)
+
+var (
+	iamStandaloneOptions IamOptions
+)
+
+type IamOptions struct {
+	filer   *string
+	masters *string
+	port    *int
+}
+
+func init() {
+	cmdIam.Run = runIam // break init cycle
+	iamStandaloneOptions.filer = cmdIam.Flag.String("filer", "localhost:8888", "filer server address")
+	iamStandaloneOptions.masters = cmdIam.Flag.String("master", "localhost:9333", "comma-separated master servers")
+	iamStandaloneOptions.port = cmdIam.Flag.Int("port", 8111, "iam server http listen port")
+}
+
+var cmdIam = &Command{
+	UsageLine: "iam [-port=8111] [-filer=<ip:port>] [-masters=<ip:port>,<ip:port>]",
+	Short:     "start a iam API compatible server",
+	Long:      "start a iam API compatible server.",
+}
+
+func runIam(cmd *Command, args []string) bool {
+	return iamStandaloneOptions.startIamServer()
+}
+
+func (iamopt *IamOptions) startIamServer() bool {
+	filerGrpcAddress, err := pb.ParseServerToGrpcAddress(*iamopt.filer)
+	if err != nil {
+		glog.Fatal(err)
+		return false
+	}
+
+	grpcDialOption := security.LoadClientTLS(util.GetViper(), "grpc.client")
+	for {
+		err = pb.WithGrpcFilerClient(filerGrpcAddress, grpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+			resp, err := client.GetFilerConfiguration(context.Background(), &filer_pb.GetFilerConfigurationRequest{})
+			if err != nil {
+				return fmt.Errorf("get filer %s configuration: %v", filerGrpcAddress, err)
+			}
+			glog.V(0).Infof("IAM read filer configuration: %s", resp)
+			return nil
+		})
+		if err != nil {
+			glog.V(0).Infof("wait to connect to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
+			time.Sleep(time.Second)
+		} else {
+			glog.V(0).Infof("connected to filer %s grpc address %s", *iamopt.filer, filerGrpcAddress)
+			break
+		}
+	}
+
+	router := mux.NewRouter().SkipClean(true)
+	_, iamApiServer_err := iamapi.NewIamApiServer(router, &iamapi.IamServerOption{
+		Filer:            *iamopt.filer,
+		Port:             *iamopt.port,
+		FilerGrpcAddress: filerGrpcAddress,
+		GrpcDialOption:   grpcDialOption,
+	})
+	glog.V(0).Info("NewIamApiServer created")
+	if iamApiServer_err != nil {
+		glog.Fatalf("IAM API Server startup error: %v", iamApiServer_err)
+	}
+
+	httpS := &http.Server{Handler: router}
+
+	listenAddress := fmt.Sprintf(":%d", *iamopt.port)
+	iamApiListener, err := util.NewListener(listenAddress, time.Duration(10)*time.Second)
+	if err != nil {
+		glog.Fatalf("IAM API Server listener on %s error: %v", listenAddress, err)
+	}
+
+	glog.V(0).Infof("Start Seaweed IAM API Server %s at http port %d", util.Version(), *iamopt.port)
+	if err = httpS.Serve(iamApiListener); err != nil {
+		glog.Fatalf("IAM API Server Fail to serve: %v", err)
+	}
+
+	return true
+}
diff --git a/weed/command/mount_std.go b/weed/command/mount_std.go
index 8e5b7a483..2474cf7dd 100644
--- a/weed/command/mount_std.go
+++ b/weed/command/mount_std.go
@@ -149,8 +149,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
 		fuse.Subtype("seaweedfs"),
 		// fuse.NoAppleDouble(), // include .DS_Store, otherwise can not delete non-empty folders
 		fuse.NoAppleXattr(),
-		fuse.NoBrowse(),
-		fuse.AutoXattr(),
 		fuse.ExclCreate(),
 		fuse.DaemonTimeout("3600"),
 		fuse.AllowSUID(),
@@ -169,6 +167,9 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
 	if *option.nonempty {
 		options = append(options, fuse.AllowNonEmptyMount())
 	}
+	if *option.readOnly {
+		options = append(options, fuse.ReadOnly())
+	}
 
 	// find mount point
 	mountRoot := filerMountRootPath
@@ -193,7 +194,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
 		CacheDir:           *option.cacheDir,
 		CacheSizeMB:        *option.cacheSizeMB,
 		DataCenter:         *option.dataCenter,
-		EntryCacheTtl:      3 * time.Second,
 		MountUid:           uid,
 		MountGid:           gid,
 		MountMode:          mountMode,
@@ -203,7 +203,6 @@ func RunMount(option *MountOptions, umask os.FileMode) bool {
 		VolumeServerAccess: *mountOptions.volumeServerAccess,
 		Cipher:             cipher,
 		UidGidMapper:       uidGidMapper,
-		ReadOnly:           *option.readOnly,
 	})
 
 	// mount
diff --git a/weed/command/scaffold.go b/weed/command/scaffold.go
index 4794da18e..88dc94df1 100644
--- a/weed/command/scaffold.go
+++ b/weed/command/scaffold.go
@@ -281,7 +281,7 @@ index.max_result_window = 10000
 #     Make sure they are not the same if using the same store type!
 # 4. Set enabled to true
 #
-# The following is just using cassandra as an example
+# The following is just using redis as an example
 ##########################
 [redis2.tmp]
 enabled = false
diff --git a/weed/command/server.go b/weed/command/server.go
index 6eb3bf97c..d0020d33b 100644
--- a/weed/command/server.go
+++ b/weed/command/server.go
@@ -58,7 +58,8 @@ var (
 	serverDisableHttp         = cmdServer.Flag.Bool("disableHttp", false, "disable http requests, only gRPC operations are allowed.")
 	volumeDataFolders         = cmdServer.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
 	volumeMaxDataVolumeCounts = cmdServer.Flag.String("volume.max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
-	volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
+	volumeMinFreeSpacePercent = cmdServer.Flag.String("volume.minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).")
+	volumeMinFreeSpace        = cmdServer.Flag.String("volume.minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.")
 	serverMetricsHttpPort     = cmdServer.Flag.Int("metricsPort", 0, "Prometheus metrics listen port")
 
 	// pulseSeconds              = cmdServer.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats")
@@ -244,8 +245,8 @@ func runServer(cmd *Command, args []string) bool {
 
 	// start volume server
 	if *isStartingVolumeServer {
-		go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, *volumeMinFreeSpacePercent)
-
+		minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
+		go serverOptions.v.startVolumeServer(*volumeDataFolders, *volumeMaxDataVolumeCounts, *serverWhiteListOption, minFreeSpaces)
 	}
 
 	if *isStartingMasterServer {
@@ -253,6 +254,4 @@ func runServer(cmd *Command, args []string) bool {
 	}
 
 	select {}
-
-	return true
 }
diff --git a/weed/command/volume.go b/weed/command/volume.go
index 9df500178..139a3791e 100644
--- a/weed/command/volume.go
+++ b/weed/command/volume.go
@@ -57,7 +57,6 @@ type VolumeServerOptions struct {
 	compactionMBPerSecond   *int
 	fileSizeLimitMB         *int
 	concurrentUploadLimitMB *int
-	minFreeSpacePercents    []float32
 	pprof                   *bool
 	preStopSeconds          *int
 	metricsHttpPort         *int
@@ -105,7 +104,8 @@ var (
 	volumeFolders         = cmdVolume.Flag.String("dir", os.TempDir(), "directories to store data files. dir[,dir]...")
 	maxVolumeCounts       = cmdVolume.Flag.String("max", "8", "maximum numbers of volumes, count[,count]... If set to zero, the limit will be auto configured.")
 	volumeWhiteListOption = cmdVolume.Flag.String("whiteList", "", "comma separated Ip addresses having write permission. No limit if empty.")
-	minFreeSpacePercent   = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly.")
+	minFreeSpacePercent   = cmdVolume.Flag.String("minFreeSpacePercent", "1", "minimum free disk space (default to 1%). Low disk space will mark all volumes as ReadOnly (deprecated, use minFreeSpace instead).")
+	minFreeSpace          = cmdVolume.Flag.String("minFreeSpace", "", "min free disk space (value<=100 as percentage like 1, other as human readable bytes, like 10GiB). Low disk space will mark all volumes as ReadOnly.")
 )
 
 func runVolume(cmd *Command, args []string) bool {
@@ -120,12 +120,13 @@ func runVolume(cmd *Command, args []string) bool {
 
 	go stats_collect.StartMetricsServer(*v.metricsHttpPort)
 
-	v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, *minFreeSpacePercent)
+	minFreeSpaces := util.MustParseMinFreeSpace(*minFreeSpace, *minFreeSpacePercent)
+	v.startVolumeServer(*volumeFolders, *maxVolumeCounts, *volumeWhiteListOption, minFreeSpaces)
 
 	return true
 }
 
-func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption, minFreeSpacePercent string) {
+func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, volumeWhiteListOption string, minFreeSpaces []util.MinFreeSpace) {
 
 	// Set multiple folders and each folder's max volume count limit'
 	v.folders = strings.Split(volumeFolders, ",")
@@ -153,22 +154,13 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
 		glog.Fatalf("%d directories by -dir, but only %d max is set by -max", len(v.folders), len(v.folderMaxLimits))
 	}
 
-	// set minFreeSpacePercent
-	minFreeSpacePercentStrings := strings.Split(minFreeSpacePercent, ",")
-	for _, freeString := range minFreeSpacePercentStrings {
-		if value, e := strconv.ParseFloat(freeString, 32); e == nil {
-			v.minFreeSpacePercents = append(v.minFreeSpacePercents, float32(value))
-		} else {
-			glog.Fatalf("The value specified in -minFreeSpacePercent not a valid value %s", freeString)
-		}
-	}
-	if len(v.minFreeSpacePercents) == 1 && len(v.folders) > 1 {
+	if len(minFreeSpaces) == 1 && len(v.folders) > 1 {
 		for i := 0; i < len(v.folders)-1; i++ {
-			v.minFreeSpacePercents = append(v.minFreeSpacePercents, v.minFreeSpacePercents[0])
+			minFreeSpaces = append(minFreeSpaces, minFreeSpaces[0])
 		}
 	}
-	if len(v.folders) != len(v.minFreeSpacePercents) {
-		glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(v.minFreeSpacePercents))
+	if len(v.folders) != len(minFreeSpaces) {
+		glog.Fatalf("%d directories by -dir, but only %d minFreeSpacePercent is set by -minFreeSpacePercent", len(v.folders), len(minFreeSpaces))
 	}
 
 	// set disk types
@@ -231,7 +223,7 @@ func (v VolumeServerOptions) startVolumeServer(volumeFolders, maxVolumeCounts, v
 
 	volumeServer := weed_server.NewVolumeServer(volumeMux, publicVolumeMux,
 		*v.ip, *v.port, *v.publicUrl,
-		v.folders, v.folderMaxLimits, v.minFreeSpacePercents, diskTypes,
+		v.folders, v.folderMaxLimits, minFreeSpaces, diskTypes,
 		*v.idxFolder,
 		volumeNeedleMapKind,
 		strings.Split(masters, ","), 5, *v.dataCenter, *v.rack,
diff --git a/weed/filer/abstract_sql/abstract_sql_store.go b/weed/filer/abstract_sql/abstract_sql_store.go
index 120a1d139..ab8f6bcbd 100644
--- a/weed/filer/abstract_sql/abstract_sql_store.go
+++ b/weed/filer/abstract_sql/abstract_sql_store.go
@@ -276,7 +276,9 @@ func (store *AbstractSqlStore) DeleteFolderChildren(ctx context.Context, fullpat
 		}
 	}
 
-	res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), fullpath)
+	glog.V(4).Infof("delete %s SQL %s %d", string(shortPath), store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)))
+
+	res, err := db.ExecContext(ctx, store.GetSqlDeleteFolderChildren(bucket), util.HashStringToLong(string(shortPath)), string(shortPath))
 	if err != nil {
 		return fmt.Errorf("deleteFolderChildren %s: %s", fullpath, err)
 	}
diff --git a/weed/filer/filechunks.go b/weed/filer/filechunks.go
index 68f308a51..346eb3cfb 100644
--- a/weed/filer/filechunks.go
+++ b/weed/filer/filechunks.go
@@ -2,7 +2,6 @@ package filer
 
 import (
 	"bytes"
-	"encoding/hex"
 	"fmt"
 	"github.com/chrislusf/seaweedfs/weed/wdclient"
 	"math"
@@ -43,12 +42,11 @@ func ETagEntry(entry *Entry) (etag string) {
 
 func ETagChunks(chunks []*filer_pb.FileChunk) (etag string) {
 	if len(chunks) == 1 {
-		return chunks[0].ETag
+		return fmt.Sprintf("%x", util.Base64Md5ToBytes(chunks[0].ETag))
 	}
 	md5_digests := [][]byte{}
 	for _, c := range chunks {
-		md5_decoded, _ := hex.DecodeString(c.ETag)
-		md5_digests = append(md5_digests, md5_decoded)
+		md5_digests = append(md5_digests, util.Base64Md5ToBytes(c.ETag))
 	}
 	return fmt.Sprintf("%x-%d", util.Md5(bytes.Join(md5_digests, nil)), len(chunks))
 }
diff --git a/weed/filer/filer_buckets.go b/weed/filer/filer_buckets.go
index ba170f02e..43fb000c9 100644
--- a/weed/filer/filer_buckets.go
+++ b/weed/filer/filer_buckets.go
@@ -29,7 +29,7 @@ func (f *Filer) LoadBuckets() {
 
 	limit := int64(math.MaxInt32)
 
-	entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "")
+	entries, _, err := f.ListDirectoryEntries(context.Background(), util.FullPath(f.DirBucketsPath), "", false, limit, "", "", "")
 
 	if err != nil {
 		glog.V(1).Infof("no buckets found: %v", err)
diff --git a/weed/filer/filer_conf.go b/weed/filer/filer_conf.go
index 8e549f5ad..ab5afc5cc 100644
--- a/weed/filer/filer_conf.go
+++ b/weed/filer/filer_conf.go
@@ -18,6 +18,7 @@ const (
 	FilerConfName         = "filer.conf"
 	IamConfigDirecotry    = "/etc/iam"
 	IamIdentityFile       = "identity.json"
+	IamPoliciesFile       = "policies.json"
 )
 
 type FilerConf struct {
diff --git a/weed/filer/filer_delete_entry.go b/weed/filer/filer_delete_entry.go
index bedf2f4d1..3ef3cfff9 100644
--- a/weed/filer/filer_delete_entry.go
+++ b/weed/filer/filer_delete_entry.go
@@ -73,7 +73,7 @@ func (f *Filer) doBatchDeleteFolderMetaAndData(ctx context.Context, entry *Entry
 	includeLastFile := false
 	if !isDeletingBucket {
 		for {
-			entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "")
+			entries, _, err := f.ListDirectoryEntries(ctx, entry.FullPath, lastFileName, includeLastFile, PaginationSize, "", "", "")
 			if err != nil {
 				glog.Errorf("list folder %s: %v", entry.FullPath, err)
 				return nil, nil, fmt.Errorf("list folder %s: %v", entry.FullPath, err)
diff --git a/weed/filer/filer_notify.go b/weed/filer/filer_notify.go
index c461a82b8..7ab101102 100644
--- a/weed/filer/filer_notify.go
+++ b/weed/filer/filer_notify.go
@@ -116,13 +116,13 @@ func (f *Filer) ReadPersistedLogBuffer(startTime time.Time, eachLogEntryFn func(
 	sizeBuf := make([]byte, 4)
 	startTsNs := startTime.UnixNano()
 
-	dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "")
+	dayEntries, _, listDayErr := f.ListDirectoryEntries(context.Background(), SystemLogDir, startDate, true, 366, "", "", "")
 	if listDayErr != nil {
 		return lastTsNs, fmt.Errorf("fail to list log by day: %v", listDayErr)
 	}
 	for _, dayEntry := range dayEntries {
 		// println("checking day", dayEntry.FullPath)
-		hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "")
+		hourMinuteEntries, _, listHourMinuteErr := f.ListDirectoryEntries(context.Background(), util.NewFullPath(SystemLogDir, dayEntry.Name()), "", false, 24*60, "", "", "")
 		if listHourMinuteErr != nil {
 			return lastTsNs, fmt.Errorf("fail to list log %s by day: %v", dayEntry.Name(), listHourMinuteErr)
 		}
diff --git a/weed/filer/filer_on_meta_event.go b/weed/filer/filer_on_meta_event.go
index a91faeb24..c9f75a5ca 100644
--- a/weed/filer/filer_on_meta_event.go
+++ b/weed/filer/filer_on_meta_event.go
@@ -52,7 +52,7 @@ func (f *Filer) maybeReloadFilerConfiguration(event *filer_pb.SubscribeMetadataR
 
 func (f *Filer) readEntry(chunks []*filer_pb.FileChunk) ([]byte, error) {
 	var buf bytes.Buffer
-	err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64, false)
+	err := StreamContent(f.MasterClient, &buf, chunks, 0, math.MaxInt64)
 	if err != nil {
 		return nil, err
 	}
diff --git a/weed/filer/filer_search.go b/weed/filer/filer_search.go
index 0a14d3756..2ee29be25 100644
--- a/weed/filer/filer_search.go
+++ b/weed/filer/filer_search.go
@@ -20,9 +20,9 @@ func splitPattern(pattern string) (prefix string, restPattern string) {
 }
 
 // For now, prefix and namePattern are mutually exclusive
-func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string) (entries []*Entry, hasMore bool, err error) {
+func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string) (entries []*Entry, hasMore bool, err error) {
 
-	_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, func(entry *Entry) bool {
+	_, err = f.StreamListDirectoryEntries(ctx, p, startFileName, inclusive, limit+1, prefix, namePattern, namePatternExclude, func(entry *Entry) bool {
 		entries = append(entries, entry)
 		return true
 	})
@@ -36,7 +36,7 @@ func (f *Filer) ListDirectoryEntries(ctx context.Context, p util.FullPath, start
 }
 
 // For now, prefix and namePattern are mutually exclusive
-func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
+func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix string, namePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (lastFileName string, err error) {
 	if strings.HasSuffix(string(p), "/") && len(p) > 1 {
 		p = p[0 : len(p)-1]
 	}
@@ -47,30 +47,38 @@ func (f *Filer) StreamListDirectoryEntries(ctx context.Context, p util.FullPath,
 	}
 	var missedCount int64
 
-	missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, eachEntryFunc)
+	missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, startFileName, inclusive, limit, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
 
 	for missedCount > 0 && err == nil {
-		missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, eachEntryFunc)
+		missedCount, lastFileName, err = f.doListPatternMatchedEntries(ctx, p, lastFileName, false, missedCount, prefix, restNamePattern, namePatternExclude, eachEntryFunc)
 	}
 
 	return
 }
 
-func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
+func (f *Filer) doListPatternMatchedEntries(ctx context.Context, p util.FullPath, startFileName string, inclusive bool, limit int64, prefix, restNamePattern string, namePatternExclude string, eachEntryFunc ListEachEntryFunc) (missedCount int64, lastFileName string, err error) {
 
-	if len(restNamePattern) == 0 {
+	if len(restNamePattern) == 0 && len(namePatternExclude) == 0 {
 		lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, eachEntryFunc)
 		return 0, lastFileName, err
 	}
 
 	lastFileName, err = f.doListValidEntries(ctx, p, startFileName, inclusive, limit, prefix, func(entry *Entry) bool {
-		nameToTest := strings.ToLower(entry.Name())
-		if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && matched {
-			if !eachEntryFunc(entry) {
-				return false
+		nameToTest := entry.Name()
+		if len(namePatternExclude) > 0 {
+			if matched, matchErr := filepath.Match(namePatternExclude, nameToTest); matchErr == nil && matched {
+				missedCount++
+				return true
 			}
-		} else {
-			missedCount++
+		}
+		if len(restNamePattern) > 0 {
+			if matched, matchErr := filepath.Match(restNamePattern, nameToTest[len(prefix):]); matchErr == nil && !matched {
+				missedCount++
+				return true
+			}
+		}
+		if !eachEntryFunc(entry) {
+			return false
 		}
 		return true
 	})
diff --git a/weed/filer/filerstore_wrapper.go b/weed/filer/filerstore_wrapper.go
index 95848e61b..cd7c0bea3 100644
--- a/weed/filer/filerstore_wrapper.go
+++ b/weed/filer/filerstore_wrapper.go
@@ -149,8 +149,8 @@ func (fsw *FilerStoreWrapper) FindEntry(ctx context.Context, fp util.FullPath) (
 		stats.FilerStoreHistogram.WithLabelValues(actualStore.GetName(), "find").Observe(time.Since(start).Seconds())
 	}()
 
-	glog.V(4).Infof("FindEntry %s", fp)
 	entry, err = actualStore.FindEntry(ctx, fp)
+	glog.V(4).Infof("FindEntry %s: %v", fp, err)
 	if err != nil {
 		return nil, err
 	}
diff --git a/weed/filer/leveldb/leveldb_store_test.go b/weed/filer/leveldb/leveldb_store_test.go
index 9c342605e..d437895f5 100644
--- a/weed/filer/leveldb/leveldb_store_test.go
+++ b/weed/filer/leveldb/leveldb_store_test.go
@@ -51,14 +51,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -77,7 +77,7 @@ func TestEmptyRoot(t *testing.T) {
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer/leveldb2/leveldb2_store_test.go b/weed/filer/leveldb2/leveldb2_store_test.go
index 495c73fdd..fd0ad18a3 100644
--- a/weed/filer/leveldb2/leveldb2_store_test.go
+++ b/weed/filer/leveldb2/leveldb2_store_test.go
@@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer/leveldb3/leveldb3_store_test.go b/weed/filer/leveldb3/leveldb3_store_test.go
index 53b0e927f..0b970a539 100644
--- a/weed/filer/leveldb3/leveldb3_store_test.go
+++ b/weed/filer/leveldb3/leveldb3_store_test.go
@@ -49,14 +49,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -75,7 +75,7 @@ func TestEmptyRoot(t *testing.T) {
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer/read_write.go b/weed/filer/read_write.go
index 546af8094..c4c90fb63 100644
--- a/weed/filer/read_write.go
+++ b/weed/filer/read_write.go
@@ -27,7 +27,7 @@ func ReadEntry(masterClient *wdclient.MasterClient, filerClient filer_pb.Seaweed
 		return err
 	}
 
-	return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
+	return StreamContent(masterClient, byteBuffer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
 
 }
 
@@ -41,8 +41,12 @@ func ReadContent(filerAddress string, dir, name string) ([]byte, error) {
 }
 
 func SaveAs(host string, port int, dir, name string, contentType string, byteBuffer *bytes.Buffer) error {
-
-	target := fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
+	var target string
+	if port == 0 {
+		target = fmt.Sprintf("http://%s%s/%s", host, dir, name)
+	} else {
+		target = fmt.Sprintf("http://%s:%d%s/%s", host, port, dir, name)
+	}
 
 	// set the HTTP method, url, and request body
 	req, err := http.NewRequest(http.MethodPut, target, byteBuffer)
diff --git a/weed/filer/reader_at.go b/weed/filer/reader_at.go
index a1e989684..b03b3bbb4 100644
--- a/weed/filer/reader_at.go
+++ b/weed/filer/reader_at.go
@@ -139,13 +139,15 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
 		}
 		glog.V(4).Infof("read [%d,%d), %d/%d chunk %s [%d,%d)", chunkStart, chunkStop, i, len(c.chunkViews), chunk.FileId, chunk.LogicOffset-chunk.Offset, chunk.LogicOffset-chunk.Offset+int64(chunk.Size))
 		var buffer []byte
-		buffer, err = c.readFromWholeChunkData(chunk, nextChunk)
+		bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
+		bufferLength := chunkStop - chunkStart
+		buffer, err = c.readChunkSlice(chunk, nextChunk, uint64(bufferOffset), uint64(bufferLength))
 		if err != nil {
 			glog.Errorf("fetching chunk %+v: %v\n", chunk, err)
 			return
 		}
-		bufferOffset := chunkStart - chunk.LogicOffset + chunk.Offset
-		copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer[bufferOffset:bufferOffset+chunkStop-chunkStart])
+
+		copied := copy(p[startOffset-offset:chunkStop-chunkStart+startOffset-offset], buffer)
 		n += copied
 		startOffset, remaining = startOffset+int64(copied), remaining-int64(copied)
 	}
@@ -167,6 +169,20 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
 
 }
 
+func (c *ChunkReadAt) readChunkSlice(chunkView *ChunkView, nextChunkViews *ChunkView, offset, length uint64) ([]byte, error) {
+
+	chunkSlice := c.chunkCache.GetChunkSlice(chunkView.FileId, offset, length)
+	if len(chunkSlice) > 0 {
+		return chunkSlice, nil
+	}
+	chunkData, err := c.readFromWholeChunkData(chunkView, nextChunkViews)
+	if err != nil {
+		return nil, err
+	}
+	wanted := min(int64(length), int64(len(chunkData))-int64(offset))
+	return chunkData[offset : int64(offset)+wanted], nil
+}
+
 func (c *ChunkReadAt) readFromWholeChunkData(chunkView *ChunkView, nextChunkViews ...*ChunkView) (chunkData []byte, err error) {
 
 	if c.lastChunkFileId == chunkView.FileId {
diff --git a/weed/filer/reader_at_test.go b/weed/filer/reader_at_test.go
index 37a34f4ea..f8e4727ce 100644
--- a/weed/filer/reader_at_test.go
+++ b/weed/filer/reader_at_test.go
@@ -20,6 +20,11 @@ func (m *mockChunkCache) GetChunk(fileId string, minSize uint64) (data []byte) {
 	}
 	return data
 }
+
+func (m *mockChunkCache) GetChunkSlice(fileId string, offset, length uint64) []byte {
+	return nil
+}
+
 func (m *mockChunkCache) SetChunk(fileId string, data []byte) {
 }
 
diff --git a/weed/filer/rocksdb/rocksdb_store_test.go b/weed/filer/rocksdb/rocksdb_store_test.go
index 439663524..f6e755b4b 100644
--- a/weed/filer/rocksdb/rocksdb_store_test.go
+++ b/weed/filer/rocksdb/rocksdb_store_test.go
@@ -53,14 +53,14 @@ func TestCreateAndFind(t *testing.T) {
 	}
 
 	// checking one upper directory
-	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "")
+	entries, _, _ := testFiler.ListDirectoryEntries(ctx, util.FullPath("/home/chris/this/is/one"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
 	}
 
 	// checking one upper directory
-	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, _ = testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if len(entries) != 1 {
 		t.Errorf("list entries count: %v", len(entries))
 		return
@@ -79,7 +79,7 @@ func TestEmptyRoot(t *testing.T) {
 	ctx := context.Background()
 
 	// checking one upper directory
-	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "")
+	entries, _, err := testFiler.ListDirectoryEntries(ctx, util.FullPath("/"), "", false, 100, "", "", "")
 	if err != nil {
 		t.Errorf("list entries: %v", err)
 		return
diff --git a/weed/filer/stream.go b/weed/filer/stream.go
index 661a210ea..2c25b8722 100644
--- a/weed/filer/stream.go
+++ b/weed/filer/stream.go
@@ -3,7 +3,6 @@ package filer
 import (
 	"bytes"
 	"fmt"
-	"golang.org/x/sync/errgroup"
 	"io"
 	"math"
 	"strings"
@@ -14,7 +13,7 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/wdclient"
 )
 
-func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64, isCheck bool) error {
+func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, chunks []*filer_pb.FileChunk, offset int64, size int64) error {
 
 	glog.V(9).Infof("start to stream content for chunks: %+v\n", chunks)
 	chunkViews := ViewFromChunks(masterClient.GetLookupFileIdFunction(), chunks, offset, size)
@@ -34,17 +33,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
 		fileId2Url[chunkView.FileId] = urlStrings
 	}
 
-	if isCheck {
-		// Pre-check all chunkViews urls
-		gErr := new(errgroup.Group)
-		CheckAllChunkViews(chunkViews, &fileId2Url, gErr)
-		if err := gErr.Wait(); err != nil {
-			glog.Errorf("check all chunks: %v", err)
-			return fmt.Errorf("check all chunks: %v", err)
-		}
-		return nil
-	}
-
 	for _, chunkView := range chunkViews {
 
 		urlStrings := fileId2Url[chunkView.FileId]
@@ -53,7 +41,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
 			glog.Errorf("read chunk: %v", err)
 			return fmt.Errorf("read chunk: %v", err)
 		}
-
 		_, err = w.Write(data)
 		if err != nil {
 			glog.Errorf("write chunk: %v", err)
@@ -65,17 +52,6 @@ func StreamContent(masterClient wdclient.HasLookupFileIdFunction, w io.Writer, c
 
 }
 
-func CheckAllChunkViews(chunkViews []*ChunkView, fileId2Url *map[string][]string, gErr *errgroup.Group) {
-	for _, chunkView := range chunkViews {
-		urlStrings := (*fileId2Url)[chunkView.FileId]
-		glog.V(9).Infof("Check chunk: %+v\n url: %v", chunkView, urlStrings)
-		gErr.Go(func() error {
-			_, err := retriedFetchChunkData(urlStrings, chunkView.CipherKey, chunkView.IsGzipped, chunkView.IsFullChunk(), chunkView.Offset, int(chunkView.Size))
-			return err
-		})
-	}
-}
-
 // ----------------  ReadAllReader ----------------------------------
 
 func ReadAll(masterClient *wdclient.MasterClient, chunks []*filer_pb.FileChunk) ([]byte, error) {
diff --git a/weed/filesys/dir.go b/weed/filesys/dir.go
index 46457f858..09d5fd449 100644
--- a/weed/filesys/dir.go
+++ b/weed/filesys/dir.go
@@ -24,9 +24,12 @@ type Dir struct {
 	wfs    *WFS
 	entry  *filer_pb.Entry
 	parent *Dir
+	id     uint64
 }
 
 var _ = fs.Node(&Dir{})
+
+var _ = fs.NodeIdentifier(&Dir{})
 var _ = fs.NodeCreater(&Dir{})
 var _ = fs.NodeMknoder(&Dir{})
 var _ = fs.NodeMkdirer(&Dir{})
@@ -42,6 +45,13 @@ var _ = fs.NodeRemovexattrer(&Dir{})
 var _ = fs.NodeListxattrer(&Dir{})
 var _ = fs.NodeForgetter(&Dir{})
 
+func (dir *Dir) Id() uint64 {
+	if dir.parent == nil {
+		return 1
+	}
+	return dir.id
+}
+
 func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
 
 	// https://github.com/bazil/fuse/issues/196
@@ -53,17 +63,18 @@ func (dir *Dir) Attr(ctx context.Context, attr *fuse.Attr) error {
 		return nil
 	}
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		glog.V(3).Infof("dir Attr %s,err: %+v", dir.FullPath(), err)
 		return err
 	}
 
-	// attr.Inode = util.FullPath(dir.FullPath()).AsInode()
-	attr.Mode = os.FileMode(dir.entry.Attributes.FileMode) | os.ModeDir
-	attr.Mtime = time.Unix(dir.entry.Attributes.Mtime, 0)
-	attr.Crtime = time.Unix(dir.entry.Attributes.Crtime, 0)
-	attr.Gid = dir.entry.Attributes.Gid
-	attr.Uid = dir.entry.Attributes.Uid
+	attr.Inode = dir.Id()
+	attr.Mode = os.FileMode(entry.Attributes.FileMode) | os.ModeDir
+	attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
+	attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
+	attr.Gid = entry.Attributes.Gid
+	attr.Uid = entry.Attributes.Uid
 
 	glog.V(4).Infof("dir Attr %s, attr: %+v", dir.FullPath(), attr)
 
@@ -74,16 +85,18 @@ func (dir *Dir) Getxattr(ctx context.Context, req *fuse.GetxattrRequest, resp *f
 
 	glog.V(4).Infof("dir Getxattr %s", dir.FullPath())
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		return err
 	}
 
-	return getxattr(dir.entry, req, resp)
+	return getxattr(entry, req, resp)
 }
 
 func (dir *Dir) setRootDirAttributes(attr *fuse.Attr) {
 	// attr.Inode = 1 // filer2.FullPath(dir.Path).AsInode()
 	attr.Valid = time.Second
+	attr.Inode = dir.Id()
 	attr.Uid = dir.wfs.option.MountUid
 	attr.Gid = dir.wfs.option.MountGid
 	attr.Mode = dir.wfs.option.MountMode
@@ -102,73 +115,88 @@ func (dir *Dir) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
 	return nil
 }
 
-func (dir *Dir) newFile(name string, entry *filer_pb.Entry) fs.Node {
-	f := dir.wfs.fsNodeCache.EnsureFsNode(util.NewFullPath(dir.FullPath(), name), func() fs.Node {
-		return &File{
-			Name:           name,
-			dir:            dir,
-			wfs:            dir.wfs,
-			entry:          entry,
-			entryViewCache: nil,
-		}
-	})
-	f.(*File).dir = dir // in case dir node was created later
-	return f
+func (dir *Dir) newFile(name string) fs.Node {
+
+	fileFullPath := util.NewFullPath(dir.FullPath(), name)
+	fileId := fileFullPath.AsInode()
+	dir.wfs.handlesLock.Lock()
+	existingHandle, found := dir.wfs.handles[fileId]
+	dir.wfs.handlesLock.Unlock()
+
+	if found {
+		glog.V(4).Infof("newFile found opened file handle: %+v", fileFullPath)
+		return existingHandle.f
+	}
+	return &File{
+		Name: name,
+		dir:  dir,
+		wfs:  dir.wfs,
+		id:   fileId,
+	}
 }
 
-func (dir *Dir) newDirectory(fullpath util.FullPath, entry *filer_pb.Entry) fs.Node {
+func (dir *Dir) newDirectory(fullpath util.FullPath) fs.Node {
+
+	return &Dir{name: fullpath.Name(), wfs: dir.wfs, parent: dir, id: fullpath.AsInode()}
 
-	d := dir.wfs.fsNodeCache.EnsureFsNode(fullpath, func() fs.Node {
-		return &Dir{name: entry.Name, wfs: dir.wfs, entry: entry, parent: dir}
-	})
-	d.(*Dir).parent = dir // in case dir node was created later
-	return d
 }
 
 func (dir *Dir) Create(ctx context.Context, req *fuse.CreateRequest,
 	resp *fuse.CreateResponse) (fs.Node, fs.Handle, error) {
 
-	if dir.wfs.option.ReadOnly {
-		return nil, nil, fuse.EPERM
-	}
+	exclusive := req.Flags&fuse.OpenExclusive != 0
+	isDirectory := req.Mode&os.ModeDir > 0
 
-	request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, req.Flags&fuse.OpenExclusive != 0)
-
-	if err != nil {
-		return nil, nil, err
+	if exclusive || isDirectory {
+		_, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, exclusive)
+		if err != nil {
+			return nil, nil, err
+		}
 	}
 	var node fs.Node
-	if request.Entry.IsDirectory {
-		node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), request.Entry)
+	if isDirectory {
+		node = dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name))
 		return node, nil, nil
 	}
 
-	node = dir.newFile(req.Name, request.Entry)
+	node = dir.newFile(req.Name)
 	file := node.(*File)
-	fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid)
+	file.entry = &filer_pb.Entry{
+		Name:        req.Name,
+		IsDirectory: req.Mode&os.ModeDir > 0,
+		Attributes: &filer_pb.FuseAttributes{
+			Mtime:       time.Now().Unix(),
+			Crtime:      time.Now().Unix(),
+			FileMode:    uint32(req.Mode &^ dir.wfs.option.Umask),
+			Uid:         req.Uid,
+			Gid:         req.Gid,
+			Collection:  dir.wfs.option.Collection,
+			Replication: dir.wfs.option.Replication,
+			TtlSec:      dir.wfs.option.TtlSec,
+		},
+	}
+	file.dirtyMetadata = true
+	fh := dir.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0)
 	return file, fh, nil
 
 }
 
 func (dir *Dir) Mknod(ctx context.Context, req *fuse.MknodRequest) (fs.Node, error) {
 
-	if dir.wfs.option.ReadOnly {
-		return nil, fuse.EPERM
-	}
-
-	request, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
+	_, err := dir.doCreateEntry(req.Name, req.Mode, req.Uid, req.Gid, false)
 
 	if err != nil {
 		return nil, err
 	}
 	var node fs.Node
-	node = dir.newFile(req.Name, request.Entry)
+	node = dir.newFile(req.Name)
 	return node, nil
 }
 
-func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exlusive bool) (*filer_pb.CreateEntryRequest, error) {
+func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, exclusive bool) (*filer_pb.CreateEntryRequest, error) {
+	dirFullPath := dir.FullPath()
 	request := &filer_pb.CreateEntryRequest{
-		Directory: dir.FullPath(),
+		Directory: dirFullPath,
 		Entry: &filer_pb.Entry{
 			Name:        name,
 			IsDirectory: mode&os.ModeDir > 0,
@@ -183,10 +211,10 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
 				TtlSec:      dir.wfs.option.TtlSec,
 			},
 		},
-		OExcl:      exlusive,
+		OExcl:      exclusive,
 		Signatures: []int32{dir.wfs.signature},
 	}
-	glog.V(1).Infof("create %s/%s", dir.FullPath(), name)
+	glog.V(1).Infof("create %s/%s", dirFullPath, name)
 
 	err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
@@ -197,11 +225,14 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
 			if strings.Contains(err.Error(), "EEXIST") {
 				return fuse.EEXIST
 			}
-			glog.V(0).Infof("create %s/%s: %v", dir.FullPath(), name, err)
+			glog.V(0).Infof("create %s/%s: %v", dirFullPath, name, err)
 			return fuse.EIO
 		}
 
-		dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+		if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+			glog.Errorf("local InsertEntry dir %s/%s: %v", dirFullPath, name, err)
+			return fuse.EIO
+		}
 
 		return nil
 	})
@@ -210,10 +241,6 @@ func (dir *Dir) doCreateEntry(name string, mode os.FileMode, uid, gid uint32, ex
 
 func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, error) {
 
-	if dir.wfs.option.ReadOnly {
-		return nil, fuse.EPERM
-	}
-
 	glog.V(4).Infof("mkdir %s: %s", dir.FullPath(), req.Name)
 
 	newEntry := &filer_pb.Entry{
@@ -228,35 +255,40 @@ func (dir *Dir) Mkdir(ctx context.Context, req *fuse.MkdirRequest) (fs.Node, err
 		},
 	}
 
+	dirFullPath := dir.FullPath()
+
 	err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
 		dir.wfs.mapPbIdFromLocalToFiler(newEntry)
 		defer dir.wfs.mapPbIdFromFilerToLocal(newEntry)
 
 		request := &filer_pb.CreateEntryRequest{
-			Directory:  dir.FullPath(),
+			Directory:  dirFullPath,
 			Entry:      newEntry,
 			Signatures: []int32{dir.wfs.signature},
 		}
 
 		glog.V(1).Infof("mkdir: %v", request)
 		if err := filer_pb.CreateEntry(client, request); err != nil {
-			glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+			glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
 			return err
 		}
 
-		dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+		if err := dir.wfs.metaCache.InsertEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+			glog.Errorf("local mkdir dir %s/%s: %v", dirFullPath, req.Name, err)
+			return fuse.EIO
+		}
 
 		return nil
 	})
 
 	if err == nil {
-		node := dir.newDirectory(util.NewFullPath(dir.FullPath(), req.Name), newEntry)
+		node := dir.newDirectory(util.NewFullPath(dirFullPath, req.Name))
 
 		return node, nil
 	}
 
-	glog.V(0).Infof("mkdir %s/%s: %v", dir.FullPath(), req.Name, err)
+	glog.V(0).Infof("mkdir %s/%s: %v", dirFullPath, req.Name, err)
 
 	return nil, fuse.EIO
 }
@@ -272,40 +304,41 @@ func (dir *Dir) Lookup(ctx context.Context, req *fuse.LookupRequest, resp *fuse.
 		glog.Errorf("dir Lookup %s: %v", dirPath, visitErr)
 		return nil, fuse.EIO
 	}
-	cachedEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
+	localEntry, cacheErr := dir.wfs.metaCache.FindEntry(context.Background(), fullFilePath)
 	if cacheErr == filer_pb.ErrNotFound {
 		return nil, fuse.ENOENT
 	}
-	entry := cachedEntry.ToProtoEntry()
 
-	if entry == nil {
+	if localEntry == nil {
 		// glog.V(3).Infof("dir Lookup cache miss %s", fullFilePath)
-		entry, err = filer_pb.GetEntry(dir.wfs, fullFilePath)
+		entry, err := filer_pb.GetEntry(dir.wfs, fullFilePath)
 		if err != nil {
 			glog.V(1).Infof("dir GetEntry %s: %v", fullFilePath, err)
 			return nil, fuse.ENOENT
 		}
+		localEntry = filer.FromPbEntry(string(dirPath), entry)
 	} else {
 		glog.V(4).Infof("dir Lookup cache hit %s", fullFilePath)
 	}
 
-	if entry != nil {
-		if entry.IsDirectory {
-			node = dir.newDirectory(fullFilePath, entry)
+	if localEntry != nil {
+		if localEntry.IsDirectory() {
+			node = dir.newDirectory(fullFilePath)
 		} else {
-			node = dir.newFile(req.Name, entry)
+			node = dir.newFile(req.Name)
 		}
 
 		// resp.EntryValid = time.Second
-		// resp.Attr.Inode = fullFilePath.AsInode()
+		resp.Attr.Inode = fullFilePath.AsInode()
 		resp.Attr.Valid = time.Second
-		resp.Attr.Mtime = time.Unix(entry.Attributes.Mtime, 0)
-		resp.Attr.Crtime = time.Unix(entry.Attributes.Crtime, 0)
-		resp.Attr.Mode = os.FileMode(entry.Attributes.FileMode)
-		resp.Attr.Gid = entry.Attributes.Gid
-		resp.Attr.Uid = entry.Attributes.Uid
-		if entry.HardLinkCounter > 0 {
-			resp.Attr.Nlink = uint32(entry.HardLinkCounter)
+		resp.Attr.Size = localEntry.FileSize
+		resp.Attr.Mtime = localEntry.Attr.Mtime
+		resp.Attr.Crtime = localEntry.Attr.Crtime
+		resp.Attr.Mode = localEntry.Attr.Mode
+		resp.Attr.Gid = localEntry.Attr.Gid
+		resp.Attr.Uid = localEntry.Attr.Uid
+		if localEntry.HardLinkCounter > 0 {
+			resp.Attr.Nlink = uint32(localEntry.HardLinkCounter)
 		}
 
 		return node, nil
@@ -320,15 +353,14 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
 	dirPath := util.FullPath(dir.FullPath())
 	glog.V(4).Infof("dir ReadDirAll %s", dirPath)
 
-	processEachEntryFn := func(entry *filer_pb.Entry, isLast bool) error {
-		if entry.IsDirectory {
-			dirent := fuse.Dirent{Name: entry.Name, Type: fuse.DT_Dir}
+	processEachEntryFn := func(entry *filer.Entry, isLast bool) {
+		if entry.IsDirectory() {
+			dirent := fuse.Dirent{Name: entry.Name(), Type: fuse.DT_Dir, Inode: dirPath.Child(entry.Name()).AsInode()}
 			ret = append(ret, dirent)
 		} else {
-			dirent := fuse.Dirent{Name: entry.Name, Type: findFileType(uint16(entry.Attributes.FileMode))}
+			dirent := fuse.Dirent{Name: entry.Name(), Type: findFileType(uint16(entry.Attr.Mode)), Inode: dirPath.Child(entry.Name()).AsInode()}
 			ret = append(ret, dirent)
 		}
-		return nil
 	}
 
 	if err = meta_cache.EnsureVisited(dir.wfs.metaCache, dir.wfs, dirPath); err != nil {
@@ -336,7 +368,7 @@ func (dir *Dir) ReadDirAll(ctx context.Context) (ret []fuse.Dirent, err error) {
 		return nil, fuse.EIO
 	}
 	listErr := dir.wfs.metaCache.ListDirectoryEntries(context.Background(), dirPath, "", false, int64(math.MaxInt32), func(entry *filer.Entry) bool {
-		processEachEntryFn(entry.ToProtoEntry(), false)
+		processEachEntryFn(entry, false)
 		return true
 	})
 	if listErr != nil {
@@ -368,11 +400,6 @@ func findFileType(mode uint16) fuse.DirentType {
 
 func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
 
-	if dir.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
-
 	if !req.Dir {
 		return dir.removeOneFile(req)
 	}
@@ -383,40 +410,32 @@ func (dir *Dir) Remove(ctx context.Context, req *fuse.RemoveRequest) error {
 
 func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
 
-	filePath := util.NewFullPath(dir.FullPath(), req.Name)
+	dirFullPath := dir.FullPath()
+	filePath := util.NewFullPath(dirFullPath, req.Name)
 	entry, err := filer_pb.GetEntry(dir.wfs, filePath)
 	if err != nil {
 		return err
 	}
-	if entry == nil {
-		return nil
-	}
 
 	// first, ensure the filer store can correctly delete
 	glog.V(3).Infof("remove file: %v", req)
-	isDeleteData := entry.HardLinkCounter <= 1
-	err = filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
+	isDeleteData := entry != nil && entry.HardLinkCounter <= 1
+	err = filer_pb.Remove(dir.wfs, dirFullPath, req.Name, isDeleteData, false, false, false, []int32{dir.wfs.signature})
 	if err != nil {
-		glog.V(3).Infof("not found remove file %s/%s: %v", dir.FullPath(), req.Name, err)
+		glog.V(3).Infof("not found remove file %s: %v", filePath, err)
 		return fuse.ENOENT
 	}
 
 	// then, delete meta cache and fsNode cache
-	dir.wfs.metaCache.DeleteEntry(context.Background(), filePath)
-
-	// clear entry inside the file
-	fsNode := dir.wfs.fsNodeCache.GetFsNode(filePath)
-	dir.wfs.fsNodeCache.DeleteFsNode(filePath)
-	if fsNode != nil {
-		if file, ok := fsNode.(*File); ok {
-			file.clearEntry()
-		}
+	if err = dir.wfs.metaCache.DeleteEntry(context.Background(), filePath); err != nil {
+		glog.V(3).Infof("local DeleteEntry %s: %v", filePath, err)
+		return fuse.ESTALE
 	}
 
 	// remove current file handle if any
 	dir.wfs.handlesLock.Lock()
 	defer dir.wfs.handlesLock.Unlock()
-	inodeId := util.NewFullPath(dir.FullPath(), req.Name).AsInode()
+	inodeId := filePath.AsInode()
 	delete(dir.wfs.handles, inodeId)
 
 	return nil
@@ -425,20 +444,20 @@ func (dir *Dir) removeOneFile(req *fuse.RemoveRequest) error {
 
 func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
 
+	dirFullPath := dir.FullPath()
 	glog.V(3).Infof("remove directory entry: %v", req)
 	ignoreRecursiveErr := true // ignore recursion error since the OS should manage it
-	err := filer_pb.Remove(dir.wfs, dir.FullPath(), req.Name, true, false, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
+	err := filer_pb.Remove(dir.wfs, dirFullPath, req.Name, true, true, ignoreRecursiveErr, false, []int32{dir.wfs.signature})
 	if err != nil {
-		glog.V(0).Infof("remove %s/%s: %v", dir.FullPath(), req.Name, err)
+		glog.V(0).Infof("remove %s/%s: %v", dirFullPath, req.Name, err)
 		if strings.Contains(err.Error(), "non-empty") {
 			return fuse.EEXIST
 		}
 		return fuse.ENOENT
 	}
 
-	t := util.NewFullPath(dir.FullPath(), req.Name)
+	t := util.NewFullPath(dirFullPath, req.Name)
 	dir.wfs.metaCache.DeleteEntry(context.Background(), t)
-	dir.wfs.fsNodeCache.DeleteFsNode(t)
 
 	return nil
 
@@ -446,73 +465,64 @@ func (dir *Dir) removeFolder(req *fuse.RemoveRequest) error {
 
 func (dir *Dir) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
 
-	if dir.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("%v dir setattr %+v", dir.FullPath(), req)
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		return err
 	}
 
 	if req.Valid.Mode() {
-		dir.entry.Attributes.FileMode = uint32(req.Mode)
+		entry.Attributes.FileMode = uint32(req.Mode)
 	}
 
 	if req.Valid.Uid() {
-		dir.entry.Attributes.Uid = req.Uid
+		entry.Attributes.Uid = req.Uid
 	}
 
 	if req.Valid.Gid() {
-		dir.entry.Attributes.Gid = req.Gid
+		entry.Attributes.Gid = req.Gid
 	}
 
 	if req.Valid.Mtime() {
-		dir.entry.Attributes.Mtime = req.Mtime.Unix()
+		entry.Attributes.Mtime = req.Mtime.Unix()
 	}
 
-	return dir.saveEntry()
+	return dir.saveEntry(entry)
 
 }
 
 func (dir *Dir) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
 
-	if dir.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("dir Setxattr %s: %s", dir.FullPath(), req.Name)
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		return err
 	}
 
-	if err := setxattr(dir.entry, req); err != nil {
+	if err := setxattr(entry, req); err != nil {
 		return err
 	}
 
-	return dir.saveEntry()
+	return dir.saveEntry(entry)
 
 }
 
 func (dir *Dir) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
 
-	if dir.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("dir Removexattr %s: %s", dir.FullPath(), req.Name)
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		return err
 	}
 
-	if err := removexattr(dir.entry, req); err != nil {
+	if err := removexattr(entry, req); err != nil {
 		return err
 	}
 
-	return dir.saveEntry()
+	return dir.saveEntry(entry)
 
 }
 
@@ -520,11 +530,12 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
 
 	glog.V(4).Infof("dir Listxattr %s", dir.FullPath())
 
-	if err := dir.maybeLoadEntry(); err != nil {
+	entry, err := dir.maybeLoadEntry()
+	if err != nil {
 		return err
 	}
 
-	if err := listxattr(dir.entry, req, resp); err != nil {
+	if err := listxattr(entry, req, resp); err != nil {
 		return err
 	}
 
@@ -534,34 +545,25 @@ func (dir *Dir) Listxattr(ctx context.Context, req *fuse.ListxattrRequest, resp
 
 func (dir *Dir) Forget() {
 	glog.V(4).Infof("Forget dir %s", dir.FullPath())
-
-	dir.wfs.fsNodeCache.DeleteFsNode(util.FullPath(dir.FullPath()))
 }
 
-func (dir *Dir) maybeLoadEntry() error {
-	if dir.entry == nil {
-		parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
-		entry, err := dir.wfs.maybeLoadEntry(parentDirPath, name)
-		if err != nil {
-			return err
-		}
-		dir.entry = entry
-	}
-	return nil
+func (dir *Dir) maybeLoadEntry() (*filer_pb.Entry, error) {
+	parentDirPath, name := util.FullPath(dir.FullPath()).DirAndName()
+	return dir.wfs.maybeLoadEntry(parentDirPath, name)
 }
 
-func (dir *Dir) saveEntry() error {
+func (dir *Dir) saveEntry(entry *filer_pb.Entry) error {
 
 	parentDir, name := util.FullPath(dir.FullPath()).DirAndName()
 
 	return dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
-		dir.wfs.mapPbIdFromLocalToFiler(dir.entry)
-		defer dir.wfs.mapPbIdFromFilerToLocal(dir.entry)
+		dir.wfs.mapPbIdFromLocalToFiler(entry)
+		defer dir.wfs.mapPbIdFromFilerToLocal(entry)
 
 		request := &filer_pb.UpdateEntryRequest{
 			Directory:  parentDir,
-			Entry:      dir.entry,
+			Entry:      entry,
 			Signatures: []int32{dir.wfs.signature},
 		}
 
@@ -572,7 +574,10 @@ func (dir *Dir) saveEntry() error {
 			return fuse.EIO
 		}
 
-		dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry))
+		if err := dir.wfs.metaCache.UpdateEntry(context.Background(), filer.FromPbEntry(request.Directory, request.Entry)); err != nil {
+			glog.Errorf("UpdateEntry dir %s/%s: %v", parentDir, name, err)
+			return fuse.ESTALE
+		}
 
 		return nil
 	})
diff --git a/weed/filesys/dir_link.go b/weed/filesys/dir_link.go
index 6266e492d..acdcd2de4 100644
--- a/weed/filesys/dir_link.go
+++ b/weed/filesys/dir_link.go
@@ -24,10 +24,6 @@ const (
 
 func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (fs.Node, error) {
 
-	if dir.wfs.option.ReadOnly {
-		return nil, fuse.EPERM
-	}
-
 	oldFile, ok := old.(*File)
 	if !ok {
 		glog.Errorf("old node is not a file: %+v", old)
@@ -35,11 +31,11 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
 
 	glog.V(4).Infof("Link: %v/%v -> %v/%v", oldFile.dir.FullPath(), oldFile.Name, dir.FullPath(), req.NewName)
 
-	if _, err := oldFile.maybeLoadEntry(ctx); err != nil {
+	oldEntry, err := oldFile.maybeLoadEntry(ctx)
+	if err != nil {
 		return nil, err
 	}
 
-	oldEntry := oldFile.getEntry()
 	if oldEntry == nil {
 		return nil, fuse.EIO
 	}
@@ -72,7 +68,7 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
 	}
 
 	// apply changes to the filer, and also apply to local metaCache
-	err := dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+	err = dir.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
 		dir.wfs.mapPbIdFromLocalToFiler(request.Entry)
 		defer dir.wfs.mapPbIdFromFilerToLocal(request.Entry)
@@ -97,11 +93,8 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
 	}
 
 	// create new file node
-	newNode := dir.newFile(req.NewName, request.Entry)
+	newNode := dir.newFile(req.NewName)
 	newFile := newNode.(*File)
-	if _, err := newFile.maybeLoadEntry(ctx); err != nil {
-		return nil, err
-	}
 
 	return newFile, err
 
@@ -109,10 +102,6 @@ func (dir *Dir) Link(ctx context.Context, req *fuse.LinkRequest, old fs.Node) (f
 
 func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node, error) {
 
-	if dir.wfs.option.ReadOnly {
-		return nil, fuse.EPERM
-	}
-
 	glog.V(4).Infof("Symlink: %v/%v to %v", dir.FullPath(), req.NewName, req.Target)
 
 	request := &filer_pb.CreateEntryRequest{
@@ -147,7 +136,7 @@ func (dir *Dir) Symlink(ctx context.Context, req *fuse.SymlinkRequest) (fs.Node,
 		return nil
 	})
 
-	symlink := dir.newFile(req.NewName, request.Entry)
+	symlink := dir.newFile(req.NewName)
 
 	return symlink, err
 
diff --git a/weed/filesys/dir_rename.go b/weed/filesys/dir_rename.go
index 28316c3bd..b07710d17 100644
--- a/weed/filesys/dir_rename.go
+++ b/weed/filesys/dir_rename.go
@@ -13,10 +13,6 @@ import (
 
 func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirectory fs.Node) error {
 
-	if dir.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	newDir := newDirectory.(*Dir)
 
 	newPath := util.NewFullPath(newDir.FullPath(), req.NewName)
@@ -68,19 +64,28 @@ func (dir *Dir) Rename(ctx context.Context, req *fuse.RenameRequest, newDirector
 		return fuse.EIO
 	}
 
-	// fmt.Printf("rename path: %v => %v\n", oldPath, newPath)
-	dir.wfs.fsNodeCache.Move(oldPath, newPath)
+	oldFsNode := NodeWithId(oldPath.AsInode())
+	newFsNode := NodeWithId(newPath.AsInode())
+	dir.wfs.Server.InvalidateInternalNode(oldFsNode, newFsNode, func(internalNode fs.Node) {
+		if file, ok := internalNode.(*File); ok {
+			glog.V(4).Infof("internal node %s", file.Name)
+			file.Name = req.NewName
+			file.id = uint64(newFsNode)
+		}
+	})
 
 	// change file handle
 	dir.wfs.handlesLock.Lock()
 	defer dir.wfs.handlesLock.Unlock()
 	inodeId := oldPath.AsInode()
 	existingHandle, found := dir.wfs.handles[inodeId]
+	glog.V(4).Infof("has open filehandle %s: %v", oldPath, found)
 	if !found || existingHandle == nil {
-		return err
+		return nil
 	}
+	glog.V(4).Infof("opened filehandle %s => %s", oldPath, newPath)
 	delete(dir.wfs.handles, inodeId)
 	dir.wfs.handles[newPath.AsInode()] = existingHandle
 
-	return err
+	return nil
 }
diff --git a/weed/filesys/dirty_page.go b/weed/filesys/dirty_page.go
index 8888cff96..1719d68e6 100644
--- a/weed/filesys/dirty_page.go
+++ b/weed/filesys/dirty_page.go
@@ -13,6 +13,7 @@ import (
 type ContinuousDirtyPages struct {
 	intervals      *ContinuousIntervals
 	f              *File
+	fh             *FileHandle
 	writeWaitGroup sync.WaitGroup
 	chunkAddLock   sync.Mutex
 	lastErr        error
@@ -94,7 +95,7 @@ func (pages *ContinuousDirtyPages) saveToStorage(reader io.Reader, offset int64,
 		defer pages.writeWaitGroup.Done()
 
 		reader = io.LimitReader(reader, size)
-		chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath())(reader, pages.f.Name, offset)
+		chunk, collection, replication, err := pages.f.wfs.saveDataAsChunk(pages.f.fullpath(), pages.fh.writeOnly)(reader, pages.f.Name, offset)
 		if err != nil {
 			glog.V(0).Infof("%s saveToStorage [%d,%d): %v", pages.f.fullpath(), offset, offset+size, err)
 			pages.lastErr = err
diff --git a/weed/filesys/file.go b/weed/filesys/file.go
index 2433be590..122aeeef4 100644
--- a/weed/filesys/file.go
+++ b/weed/filesys/file.go
@@ -2,10 +2,8 @@ package filesys
 
 import (
 	"context"
-	"io"
 	"os"
 	"sort"
-	"sync"
 	"time"
 
 	"github.com/seaweedfs/fuse"
@@ -20,6 +18,7 @@ import (
 const blockSize = 512
 
 var _ = fs.Node(&File{})
+var _ = fs.NodeIdentifier(&File{})
 var _ = fs.NodeOpener(&File{})
 var _ = fs.NodeFsyncer(&File{})
 var _ = fs.NodeSetattrer(&File{})
@@ -30,37 +29,37 @@ var _ = fs.NodeListxattrer(&File{})
 var _ = fs.NodeForgetter(&File{})
 
 type File struct {
-	Name           string
-	dir            *Dir
-	wfs            *WFS
-	entry          *filer_pb.Entry
-	entryLock      sync.RWMutex
-	entryViewCache []filer.VisibleInterval
-	isOpen         int
-	reader         io.ReaderAt
-	dirtyMetadata  bool
+	Name          string
+	dir           *Dir
+	wfs           *WFS
+	entry         *filer_pb.Entry
+	isOpen        int
+	dirtyMetadata bool
+	id            uint64
 }
 
 func (file *File) fullpath() util.FullPath {
 	return util.NewFullPath(file.dir.FullPath(), file.Name)
 }
 
+func (file *File) Id() uint64 {
+	return file.id
+}
+
 func (file *File) Attr(ctx context.Context, attr *fuse.Attr) (err error) {
 
 	glog.V(4).Infof("file Attr %s, open:%v existing:%v", file.fullpath(), file.isOpen, attr)
 
-	entry := file.getEntry()
-	if file.isOpen <= 0 || entry == nil {
-		if entry, err = file.maybeLoadEntry(ctx); err != nil {
-			return err
-		}
+	entry, err := file.maybeLoadEntry(ctx)
+	if err != nil {
+		return err
 	}
 
 	if entry == nil {
 		return fuse.ENOENT
 	}
 
-	// attr.Inode = file.fullpath().AsInode()
+	attr.Inode = file.Id()
 	attr.Valid = time.Second
 	attr.Mode = os.FileMode(entry.Attributes.FileMode)
 	attr.Size = filer.FileSize(entry)
@@ -98,7 +97,7 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
 
 	glog.V(4).Infof("file %v open %+v", file.fullpath(), req)
 
-	handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid)
+	handle := file.wfs.AcquireHandle(file, req.Uid, req.Gid, req.Flags&fuse.OpenWriteOnly > 0)
 
 	resp.Handle = fuse.HandleID(handle.handle)
 
@@ -110,10 +109,6 @@ func (file *File) Open(ctx context.Context, req *fuse.OpenRequest, resp *fuse.Op
 
 func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *fuse.SetattrResponse) error {
 
-	if file.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("%v file setattr %+v", file.fullpath(), req)
 
 	entry, err := file.maybeLoadEntry(ctx)
@@ -122,7 +117,7 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
 	}
 	if file.isOpen > 0 {
 		file.wfs.handlesLock.Lock()
-		fileHandle := file.wfs.handles[file.fullpath().AsInode()]
+		fileHandle := file.wfs.handles[file.Id()]
 		file.wfs.handlesLock.Unlock()
 
 		if fileHandle != nil {
@@ -154,8 +149,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
 				}
 			}
 			entry.Chunks = chunks
-			file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), chunks)
-			file.setReader(nil)
 		}
 		entry.Attributes.FileSize = req.Size
 		file.dirtyMetadata = true
@@ -204,10 +197,6 @@ func (file *File) Setattr(ctx context.Context, req *fuse.SetattrRequest, resp *f
 
 func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error {
 
-	if file.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("file Setxattr %s: %s", file.fullpath(), req.Name)
 
 	entry, err := file.maybeLoadEntry(ctx)
@@ -225,10 +214,6 @@ func (file *File) Setxattr(ctx context.Context, req *fuse.SetxattrRequest) error
 
 func (file *File) Removexattr(ctx context.Context, req *fuse.RemovexattrRequest) error {
 
-	if file.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	glog.V(4).Infof("file Removexattr %s: %s", file.fullpath(), req.Name)
 
 	entry, err := file.maybeLoadEntry(ctx)
@@ -272,16 +257,20 @@ func (file *File) Fsync(ctx context.Context, req *fuse.FsyncRequest) error {
 func (file *File) Forget() {
 	t := util.NewFullPath(file.dir.FullPath(), file.Name)
 	glog.V(4).Infof("Forget file %s", t)
-	file.wfs.fsNodeCache.DeleteFsNode(t)
-	file.wfs.ReleaseHandle(t, 0)
-	file.setReader(nil)
+	file.wfs.ReleaseHandle(t, fuse.HandleID(t.AsInode()))
 }
 
 func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, err error) {
-	entry = file.getEntry()
-	if file.isOpen > 0 {
-		return entry, nil
+
+	file.wfs.handlesLock.Lock()
+	handle, found := file.wfs.handles[file.Id()]
+	file.wfs.handlesLock.Unlock()
+	entry = file.entry
+	if found {
+		glog.V(4).Infof("maybeLoadEntry found opened file %s/%s", file.dir.FullPath(), file.Name)
+		entry = handle.f.entry
 	}
+
 	if entry != nil {
 		if len(entry.HardLinkId) == 0 {
 			// only always reload hard link
@@ -294,7 +283,7 @@ func (file *File) maybeLoadEntry(ctx context.Context) (entry *filer_pb.Entry, er
 		return entry, err
 	}
 	if entry != nil {
-		file.setEntry(entry)
+		// file.entry = entry
 	} else {
 		glog.Warningf("maybeLoadEntry not found entry %s/%s: %v", file.dir.FullPath(), file.Name, err)
 	}
@@ -336,44 +325,11 @@ func (file *File) addChunks(chunks []*filer_pb.FileChunk) {
 		return lessThan(chunks[i], chunks[j])
 	})
 
-	// add to entry view cache
-	for _, chunk := range chunks {
-		file.entryViewCache = filer.MergeIntoVisibles(file.entryViewCache, chunk)
-	}
-
-	file.setReader(nil)
-
 	glog.V(4).Infof("%s existing %d chunks adds %d more", file.fullpath(), len(entry.Chunks), len(chunks))
 
 	entry.Chunks = append(entry.Chunks, newChunks...)
 }
 
-func (file *File) setReader(reader io.ReaderAt) {
-	r := file.reader
-	if r != nil {
-		if closer, ok := r.(io.Closer); ok {
-			closer.Close()
-		}
-	}
-	file.reader = reader
-}
-
-func (file *File) setEntry(entry *filer_pb.Entry) {
-	file.entryLock.Lock()
-	defer file.entryLock.Unlock()
-	file.entry = entry
-	file.entryViewCache, _ = filer.NonOverlappingVisibleIntervals(file.wfs.LookupFn(), entry.Chunks)
-	file.setReader(nil)
-}
-
-func (file *File) clearEntry() {
-	file.entryLock.Lock()
-	defer file.entryLock.Unlock()
-	file.entry = nil
-	file.entryViewCache = nil
-	file.setReader(nil)
-}
-
 func (file *File) saveEntry(entry *filer_pb.Entry) error {
 	return file.wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
@@ -400,7 +356,5 @@ func (file *File) saveEntry(entry *filer_pb.Entry) error {
 }
 
 func (file *File) getEntry() *filer_pb.Entry {
-	file.entryLock.RLock()
-	defer file.entryLock.RUnlock()
 	return file.entry
 }
diff --git a/weed/filesys/filehandle.go b/weed/filesys/filehandle.go
index 4419888c4..8cbaf6fd2 100644
--- a/weed/filesys/filehandle.go
+++ b/weed/filesys/filehandle.go
@@ -20,9 +20,11 @@ import (
 
 type FileHandle struct {
 	// cache file has been written to
-	dirtyPages  *ContinuousDirtyPages
-	contentType string
-	handle      uint64
+	dirtyPages     *ContinuousDirtyPages
+	entryViewCache []filer.VisibleInterval
+	reader         io.ReaderAt
+	contentType    string
+	handle         uint64
 	sync.Mutex
 
 	f         *File
@@ -30,7 +32,7 @@ type FileHandle struct {
 	NodeId    fuse.NodeID    // file or directory the request is about
 	Uid       uint32         // user ID of process making request
 	Gid       uint32         // group ID of process making request
-
+	writeOnly bool
 }
 
 func newFileHandle(file *File, uid, gid uint32) *FileHandle {
@@ -40,6 +42,7 @@ func newFileHandle(file *File, uid, gid uint32) *FileHandle {
 		Uid:        uid,
 		Gid:        gid,
 	}
+	fh.dirtyPages.fh = fh
 	entry := fh.f.getEntry()
 	if entry != nil {
 		entry.Attributes.FileSize = filer.FileSize(entry)
@@ -125,20 +128,20 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
 	}
 
 	var chunkResolveErr error
-	if fh.f.entryViewCache == nil {
-		fh.f.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
+	if fh.entryViewCache == nil {
+		fh.entryViewCache, chunkResolveErr = filer.NonOverlappingVisibleIntervals(fh.f.wfs.LookupFn(), entry.Chunks)
 		if chunkResolveErr != nil {
 			return 0, fmt.Errorf("fail to resolve chunk manifest: %v", chunkResolveErr)
 		}
-		fh.f.setReader(nil)
+		fh.reader = nil
 	}
 
-	reader := fh.f.reader
+	reader := fh.reader
 	if reader == nil {
-		chunkViews := filer.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt64)
+		chunkViews := filer.ViewFromVisibleIntervals(fh.entryViewCache, 0, math.MaxInt64)
 		reader = filer.NewChunkReaderAtFromClient(fh.f.wfs.LookupFn(), chunkViews, fh.f.wfs.chunkCache, fileSize)
 	}
-	fh.f.setReader(reader)
+	fh.reader = reader
 
 	totalRead, err := reader.ReadAt(buff, offset)
 
@@ -154,10 +157,6 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
 // Write to the file handle
 func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *fuse.WriteResponse) error {
 
-	if fh.f.wfs.option.ReadOnly {
-		return fuse.EPERM
-	}
-
 	fh.Lock()
 	defer fh.Unlock()
 
@@ -195,25 +194,27 @@ func (fh *FileHandle) Write(ctx context.Context, req *fuse.WriteRequest, resp *f
 
 func (fh *FileHandle) Release(ctx context.Context, req *fuse.ReleaseRequest) error {
 
-	glog.V(4).Infof("Release %v fh %d", fh.f.fullpath(), fh.handle)
+	glog.V(4).Infof("Release %v fh %d open=%d", fh.f.fullpath(), fh.handle, fh.f.isOpen)
 
 	fh.Lock()
 	defer fh.Unlock()
 
+	fh.f.isOpen--
+
 	if fh.f.isOpen <= 0 {
+		fh.f.entry = nil
+		fh.entryViewCache = nil
+		fh.reader = nil
+
+		fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
+	}
+
+	if fh.f.isOpen < 0 {
 		glog.V(0).Infof("Release reset %s open count %d => %d", fh.f.Name, fh.f.isOpen, 0)
 		fh.f.isOpen = 0
 		return nil
 	}
 
-	if fh.f.isOpen == 1 {
-
-		fh.f.isOpen--
-
-		fh.f.wfs.ReleaseHandle(fh.f.fullpath(), fuse.HandleID(fh.handle))
-		fh.f.setReader(nil)
-	}
-
 	return nil
 }
 
@@ -289,7 +290,7 @@ func (fh *FileHandle) doFlush(ctx context.Context, header fuse.Header) error {
 		manifestChunks, nonManifestChunks := filer.SeparateManifestChunks(entry.Chunks)
 
 		chunks, _ := filer.CompactFileChunks(fh.f.wfs.LookupFn(), nonManifestChunks)
-		chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath()), chunks)
+		chunks, manifestErr := filer.MaybeManifestize(fh.f.wfs.saveDataAsChunk(fh.f.fullpath(), fh.writeOnly), chunks)
 		if manifestErr != nil {
 			// not good, but should be ok
 			glog.V(0).Infof("MaybeManifestize: %v", manifestErr)
diff --git a/weed/filesys/meta_cache/meta_cache.go b/weed/filesys/meta_cache/meta_cache.go
index b9d4724c9..3a64df018 100644
--- a/weed/filesys/meta_cache/meta_cache.go
+++ b/weed/filesys/meta_cache/meta_cache.go
@@ -4,7 +4,6 @@ import (
 	"context"
 	"fmt"
 	"os"
-	"strings"
 	"sync"
 
 	"github.com/chrislusf/seaweedfs/weed/filer"
@@ -31,9 +30,6 @@ func NewMetaCache(dbFolder string, baseDir util.FullPath, uidGidMapper *UidGidMa
 		visitedBoundary: bounded_tree.NewBoundedTree(baseDir),
 		uidGidMapper:    uidGidMapper,
 		invalidateFunc: func(fullpath util.FullPath) {
-			if baseDir != "/" && strings.HasPrefix(string(fullpath), string(baseDir)) {
-				fullpath = fullpath[len(baseDir):]
-			}
 			invalidateFunc(fullpath)
 		},
 	}
diff --git a/weed/filesys/meta_cache/meta_cache_init.go b/weed/filesys/meta_cache/meta_cache_init.go
index 1ca3b16d5..9af25ae29 100644
--- a/weed/filesys/meta_cache/meta_cache_init.go
+++ b/weed/filesys/meta_cache/meta_cache_init.go
@@ -17,9 +17,9 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
 		glog.V(4).Infof("ReadDirAllEntries %s ...", path)
 
 		util.Retry("ReadDirAllEntries", func() error {
-			err = filer_pb.ReadDirAllEntries(client, dirPath, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
-				entry := filer.FromPbEntry(string(dirPath), pbEntry)
-				if IsHiddenSystemEntry(string(dirPath), entry.Name()) {
+			err = filer_pb.ReadDirAllEntries(client, path, "", func(pbEntry *filer_pb.Entry, isLast bool) error {
+				entry := filer.FromPbEntry(string(path), pbEntry)
+				if IsHiddenSystemEntry(string(path), entry.Name()) {
 					return nil
 				}
 				if err := mc.doInsertEntry(context.Background(), entry); err != nil {
@@ -35,7 +35,7 @@ func EnsureVisited(mc *MetaCache, client filer_pb.FilerClient, dirPath util.Full
 		})
 
 		if err != nil {
-			err = fmt.Errorf("list %s: %v", dirPath, err)
+			err = fmt.Errorf("list %s: %v", path, err)
 		}
 
 		return
diff --git a/weed/filesys/wfs.go b/weed/filesys/wfs.go
index ba5eb4b6b..fcce7d9cc 100644
--- a/weed/filesys/wfs.go
+++ b/weed/filesys/wfs.go
@@ -41,9 +41,7 @@ type Option struct {
 	CacheDir           string
 	CacheSizeMB        int64
 	DataCenter         string
-	EntryCacheTtl      time.Duration
 	Umask              os.FileMode
-	ReadOnly           bool
 
 	MountUid   uint32
 	MountGid   uint32
@@ -105,24 +103,19 @@ func NewSeaweedFileSystem(option *Option) *WFS {
 	}
 
 	wfs.metaCache = meta_cache.NewMetaCache(path.Join(cacheDir, "meta"), util.FullPath(option.FilerMountRootPath), option.UidGidMapper, func(filePath util.FullPath) {
-		fsNode := wfs.fsNodeCache.GetFsNode(filePath)
-		if fsNode != nil {
-			if file, ok := fsNode.(*File); ok {
-				if err := wfs.Server.InvalidateNodeData(file); err != nil {
-					glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
-				}
-				file.clearEntry()
-			}
+
+		fsNode := NodeWithId(filePath.AsInode())
+		if err := wfs.Server.InvalidateNodeData(fsNode); err != nil {
+			glog.V(4).Infof("InvalidateNodeData %s : %v", filePath, err)
 		}
+
 		dir, name := filePath.DirAndName()
-		parent := wfs.root
-		if dir != "/" {
-			parent = wfs.fsNodeCache.GetFsNode(util.FullPath(dir))
+		parent := NodeWithId(util.FullPath(dir).AsInode())
+		if dir == option.FilerMountRootPath {
+			parent = NodeWithId(1)
 		}
-		if parent != nil {
-			if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
-				glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
-			}
+		if err := wfs.Server.InvalidateEntry(parent, name); err != nil {
+			glog.V(4).Infof("InvalidateEntry %s : %v", filePath, err)
 		}
 	})
 	startTime := time.Now()
@@ -131,8 +124,7 @@ func NewSeaweedFileSystem(option *Option) *WFS {
 		wfs.metaCache.Shutdown()
 	})
 
-	entry, _ := filer_pb.GetEntry(wfs, util.FullPath(wfs.option.FilerMountRootPath))
-	wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, entry: entry}
+	wfs.root = &Dir{name: wfs.option.FilerMountRootPath, wfs: wfs, id: 1}
 	wfs.fsNodeCache = newFsCache(wfs.root)
 
 	if wfs.option.ConcurrentWriters > 0 {
@@ -146,30 +138,37 @@ func (wfs *WFS) Root() (fs.Node, error) {
 	return wfs.root, nil
 }
 
-func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32) (fileHandle *FileHandle) {
+func (wfs *WFS) AcquireHandle(file *File, uid, gid uint32, writeOnly bool) (fileHandle *FileHandle) {
 
 	fullpath := file.fullpath()
 	glog.V(4).Infof("AcquireHandle %s uid=%d gid=%d", fullpath, uid, gid)
 
-	wfs.handlesLock.Lock()
-	defer wfs.handlesLock.Unlock()
+	inodeId := file.Id()
 
-	inodeId := file.fullpath().AsInode()
-	if file.isOpen > 0 {
-		existingHandle, found := wfs.handles[inodeId]
-		if found && existingHandle != nil {
-			file.isOpen++
-			return existingHandle
+	wfs.handlesLock.Lock()
+	existingHandle, found := wfs.handles[inodeId]
+	wfs.handlesLock.Unlock()
+	if found && existingHandle != nil {
+		existingHandle.f.isOpen++
+		if existingHandle.writeOnly {
+			existingHandle.writeOnly = writeOnly
 		}
+		glog.V(4).Infof("Acquired Handle %s open %d", fullpath, existingHandle.f.isOpen)
+		return existingHandle
 	}
 
+	entry, _ := file.maybeLoadEntry(context.Background())
+	file.entry = entry
 	fileHandle = newFileHandle(file, uid, gid)
-	file.maybeLoadEntry(context.Background())
+	fileHandle.writeOnly = writeOnly
 	file.isOpen++
 
+	wfs.handlesLock.Lock()
 	wfs.handles[inodeId] = fileHandle
+	wfs.handlesLock.Unlock()
 	fileHandle.handle = inodeId
 
+	glog.V(4).Infof("Acquired new Handle %s open %d", fullpath, file.isOpen)
 	return
 }
 
@@ -177,9 +176,9 @@ func (wfs *WFS) ReleaseHandle(fullpath util.FullPath, handleId fuse.HandleID) {
 	wfs.handlesLock.Lock()
 	defer wfs.handlesLock.Unlock()
 
-	glog.V(4).Infof("%s ReleaseHandle id %d current handles length %d", fullpath, handleId, len(wfs.handles))
+	glog.V(4).Infof("ReleaseHandle %s id %d current handles length %d", fullpath, handleId, len(wfs.handles))
 
-	delete(wfs.handles, fullpath.AsInode())
+	delete(wfs.handles, uint64(handleId))
 
 	return
 }
@@ -269,3 +268,12 @@ func (wfs *WFS) LookupFn() wdclient.LookupFileIdFunctionType {
 	return filer.LookupFn(wfs)
 
 }
+
+type NodeWithId uint64
+
+func (n NodeWithId) Id() uint64 {
+	return uint64(n)
+}
+func (n NodeWithId) Attr(ctx context.Context, attr *fuse.Attr) error {
+	return nil
+}
diff --git a/weed/filesys/wfs_write.go b/weed/filesys/wfs_write.go
index dbec3bebc..730578202 100644
--- a/weed/filesys/wfs_write.go
+++ b/weed/filesys/wfs_write.go
@@ -13,42 +13,43 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
-func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFunctionType {
+func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath, writeOnly bool) filer.SaveDataAsChunkFunctionType {
 
 	return func(reader io.Reader, filename string, offset int64) (chunk *filer_pb.FileChunk, collection, replication string, err error) {
 		var fileId, host string
 		var auth security.EncodedJwt
 
 		if err := wfs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+			return util.Retry("assignVolume", func() error {
+				request := &filer_pb.AssignVolumeRequest{
+					Count:       1,
+					Replication: wfs.option.Replication,
+					Collection:  wfs.option.Collection,
+					TtlSec:      wfs.option.TtlSec,
+					DiskType:    string(wfs.option.DiskType),
+					DataCenter:  wfs.option.DataCenter,
+					Path:        string(fullPath),
+				}
 
-			request := &filer_pb.AssignVolumeRequest{
-				Count:       1,
-				Replication: wfs.option.Replication,
-				Collection:  wfs.option.Collection,
-				TtlSec:      wfs.option.TtlSec,
-				DiskType:    string(wfs.option.DiskType),
-				DataCenter:  wfs.option.DataCenter,
-				Path:        string(fullPath),
-			}
+				resp, err := client.AssignVolume(context.Background(), request)
+				if err != nil {
+					glog.V(0).Infof("assign volume failure %v: %v", request, err)
+					return err
+				}
+				if resp.Error != "" {
+					return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+				}
 
-			resp, err := client.AssignVolume(context.Background(), request)
-			if err != nil {
-				glog.V(0).Infof("assign volume failure %v: %v", request, err)
-				return err
-			}
-			if resp.Error != "" {
-				return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
-			}
+				fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
+				loc := &filer_pb.Location{
+					Url:       resp.Url,
+					PublicUrl: resp.PublicUrl,
+				}
+				host = wfs.AdjustedUrl(loc)
+				collection, replication = resp.Collection, resp.Replication
 
-			fileId, auth = resp.FileId, security.EncodedJwt(resp.Auth)
-			loc := &filer_pb.Location{
-				Url:       resp.Url,
-				PublicUrl: resp.PublicUrl,
-			}
-			host = wfs.AdjustedUrl(loc)
-			collection, replication = resp.Collection, resp.Replication
-
-			return nil
+				return nil
+			})
 		}); err != nil {
 			return nil, "", "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
 		}
@@ -67,7 +68,9 @@ func (wfs *WFS) saveDataAsChunk(fullPath util.FullPath) filer.SaveDataAsChunkFun
 			return nil, "", "", fmt.Errorf("upload result: %v", uploadResult.Error)
 		}
 
-		wfs.chunkCache.SetChunk(fileId, data)
+		if !writeOnly {
+			wfs.chunkCache.SetChunk(fileId, data)
+		}
 
 		chunk = uploadResult.ToPbFileChunk(fileId, offset)
 		return chunk, collection, replication, nil
diff --git a/weed/glog/glog.go b/weed/glog/glog.go
index adb6ab5aa..352a7e185 100644
--- a/weed/glog/glog.go
+++ b/weed/glog/glog.go
@@ -398,7 +398,7 @@ type flushSyncWriter interface {
 func init() {
 	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
 	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", true, "log to standard error as well as files")
-	flag.Var(&logging.verbosity, "v", "log level for V logs")
+	flag.Var(&logging.verbosity, "v", "log levels [0|1|2|3|4], default to 0")
 	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
 	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
 	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
diff --git a/weed/iamapi/iamapi_handlers.go b/weed/iamapi/iamapi_handlers.go
new file mode 100644
index 000000000..2e5f709f3
--- /dev/null
+++ b/weed/iamapi/iamapi_handlers.go
@@ -0,0 +1,105 @@
+package iamapi
+
+import (
+	"bytes"
+	"encoding/xml"
+	"fmt"
+	"strconv"
+
+	"net/http"
+	"net/url"
+	"time"
+
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+
+	"github.com/aws/aws-sdk-go/service/iam"
+)
+
+type mimeType string
+
+const (
+	mimeNone mimeType = ""
+	mimeXML  mimeType = "application/xml"
+)
+
+func setCommonHeaders(w http.ResponseWriter) {
+	w.Header().Set("x-amz-request-id", fmt.Sprintf("%d", time.Now().UnixNano()))
+	w.Header().Set("Accept-Ranges", "bytes")
+}
+
+// Encodes the response headers into XML format.
+func encodeResponse(response interface{}) []byte {
+	var bytesBuffer bytes.Buffer
+	bytesBuffer.WriteString(xml.Header)
+	e := xml.NewEncoder(&bytesBuffer)
+	e.Encode(response)
+	return bytesBuffer.Bytes()
+}
+
+// If none of the http routes match respond with MethodNotAllowed
+func notFoundHandler(w http.ResponseWriter, r *http.Request) {
+	glog.V(0).Infof("unsupported %s %s", r.Method, r.RequestURI)
+	writeErrorResponse(w, s3err.ErrMethodNotAllowed, r.URL)
+}
+
+func writeErrorResponse(w http.ResponseWriter, errorCode s3err.ErrorCode, reqURL *url.URL) {
+	apiError := s3err.GetAPIError(errorCode)
+	errorResponse := getRESTErrorResponse(apiError, reqURL.Path)
+	encodedErrorResponse := encodeResponse(errorResponse)
+	writeResponse(w, apiError.HTTPStatusCode, encodedErrorResponse, mimeXML)
+}
+
+func writeIamErrorResponse(w http.ResponseWriter, err error, object string, value string, msg error) {
+	errCode := err.Error()
+	errorResp := ErrorResponse{}
+	errorResp.Error.Type = "Sender"
+	errorResp.Error.Code = &errCode
+	if msg != nil {
+		errMsg := msg.Error()
+		errorResp.Error.Message = &errMsg
+	}
+	glog.Errorf("Response %+v", err)
+	switch errCode {
+	case iam.ErrCodeNoSuchEntityException:
+		msg := fmt.Sprintf("The %s with name %s cannot be found.", object, value)
+		errorResp.Error.Message = &msg
+		writeResponse(w, http.StatusNotFound, encodeResponse(errorResp), mimeXML)
+	case iam.ErrCodeServiceFailureException:
+		writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
+	default:
+		writeResponse(w, http.StatusInternalServerError, encodeResponse(errorResp), mimeXML)
+	}
+}
+
+func getRESTErrorResponse(err s3err.APIError, resource string) s3err.RESTErrorResponse {
+	return s3err.RESTErrorResponse{
+		Code:      err.Code,
+		Message:   err.Description,
+		Resource:  resource,
+		RequestID: fmt.Sprintf("%d", time.Now().UnixNano()),
+	}
+}
+
+func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType mimeType) {
+	setCommonHeaders(w)
+	if response != nil {
+		w.Header().Set("Content-Length", strconv.Itoa(len(response)))
+	}
+	if mType != mimeNone {
+		w.Header().Set("Content-Type", string(mType))
+	}
+	w.WriteHeader(statusCode)
+	if response != nil {
+		glog.V(4).Infof("status %d %s: %s", statusCode, mType, string(response))
+		_, err := w.Write(response)
+		if err != nil {
+			glog.V(0).Infof("write err: %v", err)
+		}
+		w.(http.Flusher).Flush()
+	}
+}
+
+func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {
+	writeResponse(w, http.StatusOK, response, mimeXML)
+}
diff --git a/weed/iamapi/iamapi_management_handlers.go b/weed/iamapi/iamapi_management_handlers.go
new file mode 100644
index 000000000..89d283138
--- /dev/null
+++ b/weed/iamapi/iamapi_management_handlers.go
@@ -0,0 +1,453 @@
+package iamapi
+
+import (
+	"crypto/sha1"
+	"encoding/json"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+	"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+	"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+	"math/rand"
+	"net/http"
+	"net/url"
+	"reflect"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/aws/aws-sdk-go/service/iam"
+)
+
+const (
+	charsetUpper           = "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
+	charset                = charsetUpper + "abcdefghijklmnopqrstuvwxyz/"
+	policyDocumentVersion  = "2012-10-17"
+	StatementActionAdmin   = "*"
+	StatementActionWrite   = "Put*"
+	StatementActionRead    = "Get*"
+	StatementActionList    = "List*"
+	StatementActionTagging = "Tagging*"
+)
+
+var (
+	seededRand *rand.Rand = rand.New(
+		rand.NewSource(time.Now().UnixNano()))
+	policyDocuments = map[string]*PolicyDocument{}
+	policyLock      = sync.RWMutex{}
+)
+
+func MapToStatementAction(action string) string {
+	switch action {
+	case StatementActionAdmin:
+		return s3_constants.ACTION_ADMIN
+	case StatementActionWrite:
+		return s3_constants.ACTION_WRITE
+	case StatementActionRead:
+		return s3_constants.ACTION_READ
+	case StatementActionList:
+		return s3_constants.ACTION_LIST
+	case StatementActionTagging:
+		return s3_constants.ACTION_TAGGING
+	default:
+		return ""
+	}
+}
+
+func MapToIdentitiesAction(action string) string {
+	switch action {
+	case s3_constants.ACTION_ADMIN:
+		return StatementActionAdmin
+	case s3_constants.ACTION_WRITE:
+		return StatementActionWrite
+	case s3_constants.ACTION_READ:
+		return StatementActionRead
+	case s3_constants.ACTION_LIST:
+		return StatementActionList
+	case s3_constants.ACTION_TAGGING:
+		return StatementActionTagging
+	default:
+		return ""
+	}
+}
+
+type Statement struct {
+	Effect   string   `json:"Effect"`
+	Action   []string `json:"Action"`
+	Resource []string `json:"Resource"`
+}
+
+type Policies struct {
+	Policies map[string]PolicyDocument `json:"policies"`
+}
+
+type PolicyDocument struct {
+	Version   string       `json:"Version"`
+	Statement []*Statement `json:"Statement"`
+}
+
+func (p PolicyDocument) String() string {
+	b, _ := json.Marshal(p)
+	return string(b)
+}
+
+func Hash(s *string) string {
+	h := sha1.New()
+	h.Write([]byte(*s))
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func StringWithCharset(length int, charset string) string {
+	b := make([]byte, length)
+	for i := range b {
+		b[i] = charset[seededRand.Intn(len(charset))]
+	}
+	return string(b)
+}
+
+func (iama *IamApiServer) ListUsers(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListUsersResponse) {
+	for _, ident := range s3cfg.Identities {
+		resp.ListUsersResult.Users = append(resp.ListUsersResult.Users, &iam.User{UserName: &ident.Name})
+	}
+	return resp
+}
+
+func (iama *IamApiServer) ListAccessKeys(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp ListAccessKeysResponse) {
+	status := iam.StatusTypeActive
+	userName := values.Get("UserName")
+	for _, ident := range s3cfg.Identities {
+		if userName != "" && userName != ident.Name {
+			continue
+		}
+		for _, cred := range ident.Credentials {
+			resp.ListAccessKeysResult.AccessKeyMetadata = append(resp.ListAccessKeysResult.AccessKeyMetadata,
+				&iam.AccessKeyMetadata{UserName: &ident.Name, AccessKeyId: &cred.AccessKey, Status: &status},
+			)
+		}
+	}
+	return resp
+}
+
+func (iama *IamApiServer) CreateUser(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateUserResponse) {
+	userName := values.Get("UserName")
+	resp.CreateUserResult.User.UserName = &userName
+	s3cfg.Identities = append(s3cfg.Identities, &iam_pb.Identity{Name: userName})
+	return resp
+}
+
+func (iama *IamApiServer) DeleteUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp DeleteUserResponse, err error) {
+	for i, ident := range s3cfg.Identities {
+		if userName == ident.Name {
+			s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
+			return resp, nil
+		}
+	}
+	return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func (iama *IamApiServer) GetUser(s3cfg *iam_pb.S3ApiConfiguration, userName string) (resp GetUserResponse, err error) {
+	for _, ident := range s3cfg.Identities {
+		if userName == ident.Name {
+			resp.GetUserResult.User = iam.User{UserName: &ident.Name}
+			return resp, nil
+		}
+	}
+	return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func GetPolicyDocument(policy *string) (policyDocument PolicyDocument, err error) {
+	if err = json.Unmarshal([]byte(*policy), &policyDocument); err != nil {
+		return PolicyDocument{}, err
+	}
+	return policyDocument, err
+}
+
+func (iama *IamApiServer) CreatePolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreatePolicyResponse, err error) {
+	policyName := values.Get("PolicyName")
+	policyDocumentString := values.Get("PolicyDocument")
+	policyDocument, err := GetPolicyDocument(&policyDocumentString)
+	if err != nil {
+		return CreatePolicyResponse{}, err
+	}
+	policyId := Hash(&policyDocumentString)
+	arn := fmt.Sprintf("arn:aws:iam:::policy/%s", policyName)
+	resp.CreatePolicyResult.Policy.PolicyName = &policyName
+	resp.CreatePolicyResult.Policy.Arn = &arn
+	resp.CreatePolicyResult.Policy.PolicyId = &policyId
+	policies := Policies{}
+	policyLock.Lock()
+	defer policyLock.Unlock()
+	if err = iama.s3ApiConfig.GetPolicies(&policies); err != nil {
+		return resp, err
+	}
+	policies.Policies[policyName] = policyDocument
+	if err = iama.s3ApiConfig.PutPolicies(&policies); err != nil {
+		return resp, err
+	}
+	return resp, nil
+}
+
+func (iama *IamApiServer) PutUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
+	userName := values.Get("UserName")
+	policyName := values.Get("PolicyName")
+	policyDocumentString := values.Get("PolicyDocument")
+	policyDocument, err := GetPolicyDocument(&policyDocumentString)
+	if err != nil {
+		return PutUserPolicyResponse{}, err
+	}
+	policyDocuments[policyName] = &policyDocument
+	actions := GetActions(&policyDocument)
+	for _, ident := range s3cfg.Identities {
+		if userName == ident.Name {
+			for _, action := range actions {
+				ident.Actions = append(ident.Actions, action)
+			}
+			break
+		}
+	}
+	return resp, nil
+}
+
+func (iama *IamApiServer) GetUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp GetUserPolicyResponse, err error) {
+	userName := values.Get("UserName")
+	policyName := values.Get("PolicyName")
+	for _, ident := range s3cfg.Identities {
+		if userName != ident.Name {
+			continue
+		}
+
+		resp.GetUserPolicyResult.UserName = userName
+		resp.GetUserPolicyResult.PolicyName = policyName
+		if len(ident.Actions) == 0 {
+			return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+		}
+
+		policyDocument := PolicyDocument{Version: policyDocumentVersion}
+		statements := make(map[string][]string)
+		for _, action := range ident.Actions {
+			// parse "Read:EXAMPLE-BUCKET"
+			act := strings.Split(action, ":")
+
+			resource := "*"
+			if len(act) == 2 {
+				resource = fmt.Sprintf("arn:aws:s3:::%s/*", act[1])
+			}
+			statements[resource] = append(statements[resource],
+				fmt.Sprintf("s3:%s", MapToIdentitiesAction(act[0])),
+			)
+		}
+		for resource, actions := range statements {
+			isEqAction := false
+			for i, statement := range policyDocument.Statement {
+				if reflect.DeepEqual(statement.Action, actions) {
+					policyDocument.Statement[i].Resource = append(
+						policyDocument.Statement[i].Resource, resource)
+					isEqAction = true
+					break
+				}
+			}
+			if isEqAction {
+				continue
+			}
+			policyDocumentStatement := Statement{
+				Effect: "Allow",
+				Action: actions,
+			}
+			policyDocumentStatement.Resource = append(policyDocumentStatement.Resource, resource)
+			policyDocument.Statement = append(policyDocument.Statement, &policyDocumentStatement)
+		}
+		resp.GetUserPolicyResult.PolicyDocument = policyDocument.String()
+		return resp, nil
+	}
+	return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func (iama *IamApiServer) DeleteUserPolicy(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp PutUserPolicyResponse, err error) {
+	userName := values.Get("UserName")
+	for i, ident := range s3cfg.Identities {
+		if ident.Name == userName {
+			s3cfg.Identities = append(s3cfg.Identities[:i], s3cfg.Identities[i+1:]...)
+			return resp, nil
+		}
+	}
+	return resp, fmt.Errorf(iam.ErrCodeNoSuchEntityException)
+}
+
+func GetActions(policy *PolicyDocument) (actions []string) {
+	for _, statement := range policy.Statement {
+		if statement.Effect != "Allow" {
+			continue
+		}
+		for _, resource := range statement.Resource {
+			// Parse "arn:aws:s3:::my-bucket/shared/*"
+			res := strings.Split(resource, ":")
+			if len(res) != 6 || res[0] != "arn" || res[1] != "aws" || res[2] != "s3" {
+				glog.Infof("not match resource: %s", res)
+				continue
+			}
+			for _, action := range statement.Action {
+				// Parse "s3:Get*"
+				act := strings.Split(action, ":")
+				if len(act) != 2 || act[0] != "s3" {
+					glog.Infof("not match action: %s", act)
+					continue
+				}
+				statementAction := MapToStatementAction(act[1])
+				if res[5] == "*" {
+					actions = append(actions, statementAction)
+					continue
+				}
+				// Parse my-bucket/shared/*
+				path := strings.Split(res[5], "/")
+				if len(path) != 2 || path[1] != "*" {
+					glog.Infof("not match bucket: %s", path)
+					continue
+				}
+				actions = append(actions, fmt.Sprintf("%s:%s", statementAction, path[0]))
+			}
+		}
+	}
+	return actions
+}
+
+func (iama *IamApiServer) CreateAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp CreateAccessKeyResponse) {
+	userName := values.Get("UserName")
+	status := iam.StatusTypeActive
+	accessKeyId := StringWithCharset(21, charsetUpper)
+	secretAccessKey := StringWithCharset(42, charset)
+	resp.CreateAccessKeyResult.AccessKey.AccessKeyId = &accessKeyId
+	resp.CreateAccessKeyResult.AccessKey.SecretAccessKey = &secretAccessKey
+	resp.CreateAccessKeyResult.AccessKey.UserName = &userName
+	resp.CreateAccessKeyResult.AccessKey.Status = &status
+	changed := false
+	for _, ident := range s3cfg.Identities {
+		if userName == ident.Name {
+			ident.Credentials = append(ident.Credentials,
+				&iam_pb.Credential{AccessKey: accessKeyId, SecretKey: secretAccessKey})
+			changed = true
+			break
+		}
+	}
+	if !changed {
+		s3cfg.Identities = append(s3cfg.Identities,
+			&iam_pb.Identity{Name: userName,
+				Credentials: []*iam_pb.Credential{
+					{
+						AccessKey: accessKeyId,
+						SecretKey: secretAccessKey,
+					},
+				},
+			},
+		)
+	}
+	return resp
+}
+
+func (iama *IamApiServer) DeleteAccessKey(s3cfg *iam_pb.S3ApiConfiguration, values url.Values) (resp DeleteAccessKeyResponse) {
+	userName := values.Get("UserName")
+	accessKeyId := values.Get("AccessKeyId")
+	for _, ident := range s3cfg.Identities {
+		if userName == ident.Name {
+			for i, cred := range ident.Credentials {
+				if cred.AccessKey == accessKeyId {
+					ident.Credentials = append(ident.Credentials[:i], ident.Credentials[i+1:]...)
+					break
+				}
+			}
+			break
+		}
+	}
+	return resp
+}
+
+func (iama *IamApiServer) DoActions(w http.ResponseWriter, r *http.Request) {
+	if err := r.ParseForm(); err != nil {
+		writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+		return
+	}
+	values := r.PostForm
+	var s3cfgLock sync.RWMutex
+	s3cfgLock.RLock()
+	s3cfg := &iam_pb.S3ApiConfiguration{}
+	if err := iama.s3ApiConfig.GetS3ApiConfiguration(s3cfg); err != nil {
+		writeErrorResponse(w, s3err.ErrInternalError, r.URL)
+		return
+	}
+	s3cfgLock.RUnlock()
+
+	glog.V(4).Infof("DoActions: %+v", values)
+	var response interface{}
+	var err error
+	changed := true
+	switch r.Form.Get("Action") {
+	case "ListUsers":
+		response = iama.ListUsers(s3cfg, values)
+		changed = false
+	case "ListAccessKeys":
+		response = iama.ListAccessKeys(s3cfg, values)
+		changed = false
+	case "CreateUser":
+		response = iama.CreateUser(s3cfg, values)
+	case "GetUser":
+		userName := values.Get("UserName")
+		response, err = iama.GetUser(s3cfg, userName)
+		if err != nil {
+			writeIamErrorResponse(w, err, "user", userName, nil)
+			return
+		}
+		changed = false
+	case "DeleteUser":
+		userName := values.Get("UserName")
+		response, err = iama.DeleteUser(s3cfg, userName)
+		if err != nil {
+			writeIamErrorResponse(w, err, "user", userName, nil)
+			return
+		}
+	case "CreateAccessKey":
+		response = iama.CreateAccessKey(s3cfg, values)
+	case "DeleteAccessKey":
+		response = iama.DeleteAccessKey(s3cfg, values)
+	case "CreatePolicy":
+		response, err = iama.CreatePolicy(s3cfg, values)
+		if err != nil {
+			glog.Errorf("CreatePolicy:  %+v", err)
+			writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+			return
+		}
+	case "PutUserPolicy":
+		response, err = iama.PutUserPolicy(s3cfg, values)
+		if err != nil {
+			glog.Errorf("PutUserPolicy:  %+v", err)
+			writeErrorResponse(w, s3err.ErrInvalidRequest, r.URL)
+			return
+		}
+	case "GetUserPolicy":
+		response, err = iama.GetUserPolicy(s3cfg, values)
+		if err != nil {
+			writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
+			return
+		}
+		changed = false
+	case "DeleteUserPolicy":
+		if response, err = iama.DeleteUserPolicy(s3cfg, values); err != nil {
+			writeIamErrorResponse(w, err, "user", values.Get("UserName"), nil)
+		}
+	default:
+		errNotImplemented := s3err.GetAPIError(s3err.ErrNotImplemented)
+		errorResponse := ErrorResponse{}
+		errorResponse.Error.Code = &errNotImplemented.Code
+		errorResponse.Error.Message = &errNotImplemented.Description
+		writeResponse(w, errNotImplemented.HTTPStatusCode, encodeResponse(errorResponse), mimeXML)
+		return
+	}
+	if changed {
+		s3cfgLock.Lock()
+		err := iama.s3ApiConfig.PutS3ApiConfiguration(s3cfg)
+		s3cfgLock.Unlock()
+		if err != nil {
+			writeIamErrorResponse(w, fmt.Errorf(iam.ErrCodeServiceFailureException), "", "", err)
+			return
+		}
+	}
+	writeSuccessResponseXML(w, encodeResponse(response))
+}
diff --git a/weed/iamapi/iamapi_response.go b/weed/iamapi/iamapi_response.go
new file mode 100644
index 000000000..77328b608
--- /dev/null
+++ b/weed/iamapi/iamapi_response.go
@@ -0,0 +1,103 @@
+package iamapi
+
+import (
+	"encoding/xml"
+	"fmt"
+	"time"
+
+	"github.com/aws/aws-sdk-go/service/iam"
+)
+
+type CommonResponse struct {
+	ResponseMetadata struct {
+		RequestId string `xml:"RequestId"`
+	} `xml:"ResponseMetadata"`
+}
+
+type ListUsersResponse struct {
+	CommonResponse
+	XMLName         xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListUsersResponse"`
+	ListUsersResult struct {
+		Users       []*iam.User `xml:"Users>member"`
+		IsTruncated bool        `xml:"IsTruncated"`
+	} `xml:"ListUsersResult"`
+}
+
+type ListAccessKeysResponse struct {
+	CommonResponse
+	XMLName              xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ListAccessKeysResponse"`
+	ListAccessKeysResult struct {
+		AccessKeyMetadata []*iam.AccessKeyMetadata `xml:"AccessKeyMetadata>member"`
+		IsTruncated       bool                     `xml:"IsTruncated"`
+	} `xml:"ListAccessKeysResult"`
+}
+
+type DeleteAccessKeyResponse struct {
+	CommonResponse
+	XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteAccessKeyResponse"`
+}
+
+type CreatePolicyResponse struct {
+	CommonResponse
+	XMLName            xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreatePolicyResponse"`
+	CreatePolicyResult struct {
+		Policy iam.Policy `xml:"Policy"`
+	} `xml:"CreatePolicyResult"`
+}
+
+type CreateUserResponse struct {
+	CommonResponse
+	XMLName          xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateUserResponse"`
+	CreateUserResult struct {
+		User iam.User `xml:"User"`
+	} `xml:"CreateUserResult"`
+}
+
+type DeleteUserResponse struct {
+	CommonResponse
+	XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ DeleteUserResponse"`
+}
+
+type GetUserResponse struct {
+	CommonResponse
+	XMLName       xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserResponse"`
+	GetUserResult struct {
+		User iam.User `xml:"User"`
+	} `xml:"GetUserResult"`
+}
+
+type CreateAccessKeyResponse struct {
+	CommonResponse
+	XMLName               xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ CreateAccessKeyResponse"`
+	CreateAccessKeyResult struct {
+		AccessKey iam.AccessKey `xml:"AccessKey"`
+	} `xml:"CreateAccessKeyResult"`
+}
+
+type PutUserPolicyResponse struct {
+	CommonResponse
+	XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ PutUserPolicyResponse"`
+}
+
+type GetUserPolicyResponse struct {
+	CommonResponse
+	XMLName             xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ GetUserPolicyResponse"`
+	GetUserPolicyResult struct {
+		UserName       string `xml:"UserName"`
+		PolicyName     string `xml:"PolicyName"`
+		PolicyDocument string `xml:"PolicyDocument"`
+	} `xml:"GetUserPolicyResult"`
+}
+
+type ErrorResponse struct {
+	CommonResponse
+	XMLName xml.Name `xml:"https://iam.amazonaws.com/doc/2010-05-08/ ErrorResponse"`
+	Error   struct {
+		iam.ErrorDetails
+		Type string `xml:"Type"`
+	} `xml:"Error"`
+}
+
+func (r *CommonResponse) SetRequestId() {
+	r.ResponseMetadata.RequestId = fmt.Sprintf("%d", time.Now().UnixNano())
+}
diff --git a/weed/iamapi/iamapi_server.go b/weed/iamapi/iamapi_server.go
new file mode 100644
index 000000000..18af1a919
--- /dev/null
+++ b/weed/iamapi/iamapi_server.go
@@ -0,0 +1,149 @@
+package iamapi
+
+// https://docs.aws.amazon.com/cli/latest/reference/iam/list-roles.html
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/filer"
+	"github.com/chrislusf/seaweedfs/weed/pb"
+	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
+	"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+	"github.com/chrislusf/seaweedfs/weed/s3api"
+	. "github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
+	"github.com/chrislusf/seaweedfs/weed/wdclient"
+	"github.com/gorilla/mux"
+	"google.golang.org/grpc"
+	"net/http"
+	"strings"
+)
+
+type IamS3ApiConfig interface {
+	GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+	PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+	GetPolicies(policies *Policies) (err error)
+	PutPolicies(policies *Policies) (err error)
+}
+
+type IamS3ApiConfigure struct {
+	option       *IamServerOption
+	masterClient *wdclient.MasterClient
+}
+
+type IamServerOption struct {
+	Masters          string
+	Filer            string
+	Port             int
+	FilerGrpcAddress string
+	GrpcDialOption   grpc.DialOption
+}
+
+type IamApiServer struct {
+	s3ApiConfig IamS3ApiConfig
+	iam         *s3api.IdentityAccessManagement
+}
+
+var s3ApiConfigure IamS3ApiConfig
+
+func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
+	s3ApiConfigure = IamS3ApiConfigure{
+		option:       option,
+		masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", 0, "", strings.Split(option.Masters, ",")),
+	}
+	s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
+	iamApiServer = &IamApiServer{
+		s3ApiConfig: s3ApiConfigure,
+		iam:         s3api.NewIdentityAccessManagement(&s3Option),
+	}
+
+	iamApiServer.registerRouter(router)
+
+	return iamApiServer, nil
+}
+
+func (iama *IamApiServer) registerRouter(router *mux.Router) {
+	// API Router
+	apiRouter := router.PathPrefix("/").Subrouter()
+	// ListBuckets
+
+	// apiRouter.Methods("GET").Path("/").HandlerFunc(track(s3a.iam.Auth(s3a.ListBucketsHandler, ACTION_ADMIN), "LIST"))
+	apiRouter.Methods("POST").Path("/").HandlerFunc(iama.iam.Auth(iama.DoActions, ACTION_ADMIN))
+	//
+	// NotFound
+	apiRouter.NotFoundHandler = http.HandlerFunc(notFoundHandler)
+}
+
+func (iam IamS3ApiConfigure) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+	var buf bytes.Buffer
+	err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+		if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamIdentityFile, &buf); err != nil {
+			return err
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	if buf.Len() > 0 {
+		if err = filer.ParseS3ConfigurationFromBytes(buf.Bytes(), s3cfg); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (iam IamS3ApiConfigure) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+	buf := bytes.Buffer{}
+	if err := filer.S3ConfigurationToText(&buf, s3cfg); err != nil {
+		return fmt.Errorf("S3ConfigurationToText: %s", err)
+	}
+	return pb.WithGrpcFilerClient(
+		iam.option.FilerGrpcAddress,
+		iam.option.GrpcDialOption,
+		func(client filer_pb.SeaweedFilerClient) error {
+			if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamIdentityFile, buf.Bytes()); err != nil {
+				return err
+			}
+			return nil
+		},
+	)
+}
+
+func (iam IamS3ApiConfigure) GetPolicies(policies *Policies) (err error) {
+	var buf bytes.Buffer
+	err = pb.WithGrpcFilerClient(iam.option.FilerGrpcAddress, iam.option.GrpcDialOption, func(client filer_pb.SeaweedFilerClient) error {
+		if err = filer.ReadEntry(iam.masterClient, client, filer.IamConfigDirecotry, filer.IamPoliciesFile, &buf); err != nil {
+			return err
+		}
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+	if buf.Len() == 0 {
+		policies.Policies = make(map[string]PolicyDocument)
+		return nil
+	}
+	if err := json.Unmarshal(buf.Bytes(), policies); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (iam IamS3ApiConfigure) PutPolicies(policies *Policies) (err error) {
+	var b []byte
+	if b, err = json.Marshal(policies); err != nil {
+		return err
+	}
+	return pb.WithGrpcFilerClient(
+		iam.option.FilerGrpcAddress,
+		iam.option.GrpcDialOption,
+		func(client filer_pb.SeaweedFilerClient) error {
+			if err := filer.SaveInsideFiler(client, filer.IamConfigDirecotry, filer.IamPoliciesFile, b); err != nil {
+				return err
+			}
+			return nil
+		},
+	)
+}
diff --git a/weed/iamapi/iamapi_test.go b/weed/iamapi/iamapi_test.go
new file mode 100644
index 000000000..09aaf0ac8
--- /dev/null
+++ b/weed/iamapi/iamapi_test.go
@@ -0,0 +1,181 @@
+package iamapi
+
+import (
+	"encoding/xml"
+	"github.com/aws/aws-sdk-go/aws"
+	"github.com/aws/aws-sdk-go/aws/session"
+	"github.com/aws/aws-sdk-go/service/iam"
+	"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
+	"github.com/gorilla/mux"
+	"github.com/jinzhu/copier"
+	"github.com/stretchr/testify/assert"
+	"net/http"
+	"net/http/httptest"
+	"testing"
+)
+
+var GetS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+var PutS3ApiConfiguration func(s3cfg *iam_pb.S3ApiConfiguration) (err error)
+var GetPolicies func(policies *Policies) (err error)
+var PutPolicies func(policies *Policies) (err error)
+
+var s3config = iam_pb.S3ApiConfiguration{}
+var policiesFile = Policies{Policies: make(map[string]PolicyDocument)}
+var ias = IamApiServer{s3ApiConfig: iamS3ApiConfigureMock{}}
+
+type iamS3ApiConfigureMock struct{}
+
+func (iam iamS3ApiConfigureMock) GetS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+	_ = copier.Copy(&s3cfg.Identities, &s3config.Identities)
+	return nil
+}
+
+func (iam iamS3ApiConfigureMock) PutS3ApiConfiguration(s3cfg *iam_pb.S3ApiConfiguration) (err error) {
+	_ = copier.Copy(&s3config.Identities, &s3cfg.Identities)
+	return nil
+}
+
+func (iam iamS3ApiConfigureMock) GetPolicies(policies *Policies) (err error) {
+	_ = copier.Copy(&policies, &policiesFile)
+	return nil
+}
+
+func (iam iamS3ApiConfigureMock) PutPolicies(policies *Policies) (err error) {
+	_ = copier.Copy(&policiesFile, &policies)
+	return nil
+}
+
+func TestCreateUser(t *testing.T) {
+	userName := aws.String("Test")
+	params := &iam.CreateUserInput{UserName: userName}
+	req, _ := iam.New(session.New()).CreateUserRequest(params)
+	_ = req.Build()
+	out := CreateUserResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+	//assert.Equal(t, out.XMLName, "lol")
+}
+
+func TestListUsers(t *testing.T) {
+	params := &iam.ListUsersInput{}
+	req, _ := iam.New(session.New()).ListUsersRequest(params)
+	_ = req.Build()
+	out := ListUsersResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestListAccessKeys(t *testing.T) {
+	svc := iam.New(session.New())
+	params := &iam.ListAccessKeysInput{}
+	req, _ := svc.ListAccessKeysRequest(params)
+	_ = req.Build()
+	out := ListAccessKeysResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestGetUser(t *testing.T) {
+	userName := aws.String("Test")
+	params := &iam.GetUserInput{UserName: userName}
+	req, _ := iam.New(session.New()).GetUserRequest(params)
+	_ = req.Build()
+	out := GetUserResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+// Todo flat statement
+func TestCreatePolicy(t *testing.T) {
+	params := &iam.CreatePolicyInput{
+		PolicyName: aws.String("S3-read-only-example-bucket"),
+		PolicyDocument: aws.String(`
+			{
+			  "Version": "2012-10-17",
+			  "Statement": [
+				{
+				  "Effect": "Allow",
+				  "Action": [
+					"s3:Get*",
+					"s3:List*"
+				  ],
+				  "Resource": [
+					"arn:aws:s3:::EXAMPLE-BUCKET",
+					"arn:aws:s3:::EXAMPLE-BUCKET/*"
+				  ]
+				}
+			  ]
+			}`),
+	}
+	req, _ := iam.New(session.New()).CreatePolicyRequest(params)
+	_ = req.Build()
+	out := CreatePolicyResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestPutUserPolicy(t *testing.T) {
+	userName := aws.String("Test")
+	params := &iam.PutUserPolicyInput{
+		UserName:   userName,
+		PolicyName: aws.String("S3-read-only-example-bucket"),
+		PolicyDocument: aws.String(
+			`{
+				  "Version": "2012-10-17",
+				  "Statement": [
+					{
+					  "Effect": "Allow",
+					  "Action": [
+						"s3:Get*",
+						"s3:List*"
+					  ],
+					  "Resource": [
+						"arn:aws:s3:::EXAMPLE-BUCKET",
+						"arn:aws:s3:::EXAMPLE-BUCKET/*"
+					  ]
+					}
+				  ]
+			}`),
+	}
+	req, _ := iam.New(session.New()).PutUserPolicyRequest(params)
+	_ = req.Build()
+	out := PutUserPolicyResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestGetUserPolicy(t *testing.T) {
+	userName := aws.String("Test")
+	params := &iam.GetUserPolicyInput{UserName: userName, PolicyName: aws.String("S3-read-only-example-bucket")}
+	req, _ := iam.New(session.New()).GetUserPolicyRequest(params)
+	_ = req.Build()
+	out := GetUserPolicyResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func TestDeleteUser(t *testing.T) {
+	userName := aws.String("Test")
+	params := &iam.DeleteUserInput{UserName: userName}
+	req, _ := iam.New(session.New()).DeleteUserRequest(params)
+	_ = req.Build()
+	out := DeleteUserResponse{}
+	response, err := executeRequest(req.HTTPRequest, out)
+	assert.Equal(t, nil, err)
+	assert.Equal(t, http.StatusOK, response.Code)
+}
+
+func executeRequest(req *http.Request, v interface{}) (*httptest.ResponseRecorder, error) {
+	rr := httptest.NewRecorder()
+	apiRouter := mux.NewRouter().SkipClean(true)
+	apiRouter.Path("/").Methods("POST").HandlerFunc(ias.DoActions)
+	apiRouter.ServeHTTP(rr, req)
+	return rr, xml.Unmarshal(rr.Body.Bytes(), &v)
+}
diff --git a/weed/messaging/broker/broker_append.go b/weed/messaging/broker/broker_append.go
index 8e5b56fd0..40c807164 100644
--- a/weed/messaging/broker/broker_append.go
+++ b/weed/messaging/broker/broker_append.go
@@ -3,6 +3,7 @@ package broker
 import (
 	"context"
 	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/security"
 	"io"
 
 	"github.com/chrislusf/seaweedfs/weed/glog"
@@ -10,7 +11,6 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/filer_pb"
 	"github.com/chrislusf/seaweedfs/weed/pb/messaging_pb"
-	"github.com/chrislusf/seaweedfs/weed/security"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
@@ -53,26 +53,33 @@ func (broker *MessageBroker) assignAndUpload(topicConfig *messaging_pb.TopicConf
 	// assign a volume location
 	if err := broker.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
 
-		request := &filer_pb.AssignVolumeRequest{
-			Count:       1,
-			Replication: topicConfig.Replication,
-			Collection:  topicConfig.Collection,
-		}
+		assignErr := util.Retry("assignVolume", func() error {
+			request := &filer_pb.AssignVolumeRequest{
+				Count:       1,
+				Replication: topicConfig.Replication,
+				Collection:  topicConfig.Collection,
+			}
 
-		resp, err := client.AssignVolume(context.Background(), request)
-		if err != nil {
-			glog.V(0).Infof("assign volume failure %v: %v", request, err)
-			return err
-		}
-		if resp.Error != "" {
-			return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
-		}
+			resp, err := client.AssignVolume(context.Background(), request)
+			if err != nil {
+				glog.V(0).Infof("assign volume failure %v: %v", request, err)
+				return err
+			}
+			if resp.Error != "" {
+				return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+			}
 
-		assignResult.Auth = security.EncodedJwt(resp.Auth)
-		assignResult.Fid = resp.FileId
-		assignResult.Url = resp.Url
-		assignResult.PublicUrl = resp.PublicUrl
-		assignResult.Count = uint64(resp.Count)
+			assignResult.Auth = security.EncodedJwt(resp.Auth)
+			assignResult.Fid = resp.FileId
+			assignResult.Url = resp.Url
+			assignResult.PublicUrl = resp.PublicUrl
+			assignResult.Count = uint64(resp.Count)
+
+			return nil
+		})
+		if assignErr != nil {
+			return assignErr
+		}
 
 		return nil
 	}); err != nil {
diff --git a/weed/operation/assign_file_id.go b/weed/operation/assign_file_id.go
index cc1359961..ffd3e4938 100644
--- a/weed/operation/assign_file_id.go
+++ b/weed/operation/assign_file_id.go
@@ -86,6 +86,7 @@ func Assign(masterFn GetMasterFn, grpcDialOption grpc.DialOption, primaryRequest
 			continue
 		}
 
+		break
 	}
 
 	return ret, lastError
diff --git a/weed/operation/upload_content.go b/weed/operation/upload_content.go
index e891ae03b..8e7c6f733 100644
--- a/weed/operation/upload_content.go
+++ b/weed/operation/upload_content.go
@@ -11,7 +11,6 @@ import (
 	"net/http"
 	"net/textproto"
 	"path/filepath"
-	"runtime/debug"
 	"strings"
 	"time"
 
@@ -40,7 +39,7 @@ func (uploadResult *UploadResult) ToPbFileChunk(fileId string, offset int64) *fi
 		Offset:       offset,
 		Size:         uint64(uploadResult.Size),
 		Mtime:        time.Now().UnixNano(),
-		ETag:         uploadResult.ETag,
+		ETag:         uploadResult.ContentMd5,
 		CipherKey:    uploadResult.CipherKey,
 		IsCompressed: uploadResult.Gzip > 0,
 		Fid:          fid,
@@ -235,8 +234,12 @@ func upload_content(uploadUrl string, fillBufferFunction func(w io.Writer) error
 	// print("+")
 	resp, post_err := HttpClient.Do(req)
 	if post_err != nil {
-		glog.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
-		debug.PrintStack()
+		if strings.Contains(post_err.Error(), "connection reset by peer") ||
+			strings.Contains(post_err.Error(), "use of closed network connection") {
+			resp, post_err = HttpClient.Do(req)
+		}
+	}
+	if post_err != nil {
 		return nil, fmt.Errorf("upload %s %d bytes to %v: %v", filename, originalDataSize, uploadUrl, post_err)
 	}
 	// print("-")
diff --git a/weed/pb/master.proto b/weed/pb/master.proto
index 6a1758ccc..cdb49d1e3 100644
--- a/weed/pb/master.proto
+++ b/weed/pb/master.proto
@@ -288,6 +288,7 @@ message LeaseAdminTokenRequest {
     int64 previous_token = 1;
     int64 previous_lock_time = 2;
     string lock_name = 3;
+    string client_name = 4;
 }
 message LeaseAdminTokenResponse {
     int64 token = 1;
diff --git a/weed/pb/master_pb/master.pb.go b/weed/pb/master_pb/master.pb.go
index 7e1f282dd..29d8499f8 100644
--- a/weed/pb/master_pb/master.pb.go
+++ b/weed/pb/master_pb/master.pb.go
@@ -2468,6 +2468,7 @@ type LeaseAdminTokenRequest struct {
 	PreviousToken    int64  `protobuf:"varint,1,opt,name=previous_token,json=previousToken,proto3" json:"previous_token,omitempty"`
 	PreviousLockTime int64  `protobuf:"varint,2,opt,name=previous_lock_time,json=previousLockTime,proto3" json:"previous_lock_time,omitempty"`
 	LockName         string `protobuf:"bytes,3,opt,name=lock_name,json=lockName,proto3" json:"lock_name,omitempty"`
+	ClientName       string `protobuf:"bytes,4,opt,name=client_name,json=clientName,proto3" json:"client_name,omitempty"`
 }
 
 func (x *LeaseAdminTokenRequest) Reset() {
@@ -2523,6 +2524,13 @@ func (x *LeaseAdminTokenRequest) GetLockName() string {
 	return ""
 }
 
+func (x *LeaseAdminTokenRequest) GetClientName() string {
+	if x != nil {
+		return x.ClientName
+	}
+	return ""
+}
+
 type LeaseAdminTokenResponse struct {
 	state         protoimpl.MessageState
 	sizeCache     protoimpl.SizeCache
@@ -3268,7 +3276,7 @@ var file_master_proto_rawDesc = []byte{
 	0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52,
 	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x72, 0x70, 0x63, 0x5f,
 	0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
-	0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0x8a,
+	0x0d, 0x67, 0x72, 0x70, 0x63, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x65, 0x73, 0x22, 0xab,
 	0x01, 0x0a, 0x16, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b,
 	0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65,
 	0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28,
@@ -3277,103 +3285,105 @@ var file_master_proto_rawDesc = []byte{
 	0x6b, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72,
 	0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b,
 	0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
-	0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17, 0x4c,
-	0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
-	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
-	0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x0a,
-	0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03,
-	0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18, 0x52,
-	0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e,
-	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76, 0x69,
-	0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52,
-	0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2c,
-	0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b, 0x5f,
-	0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65, 0x76,
-	0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a, 0x09,
-	0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
-	0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65, 0x6c,
-	0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65,
-	0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77, 0x65,
-	0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62,
-	0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
-	0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74,
-	0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x52,
-	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51, 0x0a,
-	0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x1f,
-	0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70, 0x43,
-	0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
-	0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75,
-	0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01,
-	0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
-	0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
-	0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
-	0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f,
-	0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
-	0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18, 0x2e,
-	0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e,
-	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
-	0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
-	0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69,
-	0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53,
-	0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
-	0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74, 0x61,
-	0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
-	0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c,
-	0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
-	0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
-	0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
-	0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74,
-	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43, 0x6f,
-	0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x22,
-	0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
-	0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
-	0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43,
-	0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52,
-	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f, 0x6c,
-	0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
-	0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65,
-	0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70,
-	0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70,
-	0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70,
-	0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
-	0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c,
-	0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73,
-	0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56,
-	0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12,
-	0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12,
-	0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
-	0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
-	0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63, 0x75,
-	0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
-	0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
-	0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x6d,
-	0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74,
-	0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52,
-	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
-	0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66,
-	0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
-	0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65,
-	0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
-	0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43,
-	0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e,
-	0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61,
-	0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
-	0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
-	0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
-	0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
-	0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d, 0x61,
-	0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
-	0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
-	0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69,
-	0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
-	0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54,
-	0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61,
-	0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41,
-	0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
-	0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f,
-	0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61, 0x77,
-	0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d, 0x61,
-	0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+	0x09, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63,
+	0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x0a, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x4d, 0x0a, 0x17,
+	0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e,
+	0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a,
+	0x0a, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x74, 0x73, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
+	0x03, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x54, 0x73, 0x4e, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x18,
+	0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65,
+	0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x65, 0x76,
+	0x69, 0x6f, 0x75, 0x73, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
+	0x52, 0x0d, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12,
+	0x2c, 0x0a, 0x12, 0x70, 0x72, 0x65, 0x76, 0x69, 0x6f, 0x75, 0x73, 0x5f, 0x6c, 0x6f, 0x63, 0x6b,
+	0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x10, 0x70, 0x72, 0x65,
+	0x76, 0x69, 0x6f, 0x75, 0x73, 0x4c, 0x6f, 0x63, 0x6b, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1b, 0x0a,
+	0x09, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
+	0x52, 0x08, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x1b, 0x0a, 0x19, 0x52, 0x65,
+	0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52,
+	0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xca, 0x09, 0x0a, 0x07, 0x53, 0x65, 0x61, 0x77,
+	0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x72, 0x74,
+	0x62, 0x65, 0x61, 0x74, 0x12, 0x14, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+	0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x1a, 0x1c, 0x2e, 0x6d, 0x61, 0x73,
+	0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x48, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x51,
+	0x0a, 0x0d, 0x4b, 0x65, 0x65, 0x70, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12,
+	0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4b, 0x65, 0x65, 0x70,
+	0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c,
+	0x75, 0x6d, 0x65, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x00, 0x28, 0x01, 0x30,
+	0x01, 0x12, 0x51, 0x0a, 0x0c, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d,
+	0x65, 0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+	0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+	0x74, 0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
+	0x6f, 0x6b, 0x75, 0x70, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x22, 0x00, 0x12, 0x3f, 0x0a, 0x06, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x12, 0x18,
+	0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67,
+	0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+	0x72, 0x5f, 0x70, 0x62, 0x2e, 0x41, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
+	0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74,
+	0x69, 0x63, 0x73, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+	0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+	0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x53, 0x74,
+	0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+	0x4c, 0x69, 0x73, 0x74, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62,
+	0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73, 0x74, 0x52,
+	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+	0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x73,
+	0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5d, 0x0a, 0x10, 0x43,
+	0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12,
+	0x22, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x43, 0x6f, 0x6c, 0x6c,
+	0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+	0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e,
+	0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65,
+	0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0a, 0x56, 0x6f,
+	0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x1c, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65,
+	0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52,
+	0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f,
+	0x70, 0x62, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73,
+	0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x57, 0x0a, 0x0e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75,
+	0x70, 0x45, 0x63, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+	0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63, 0x56, 0x6f,
+	0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61,
+	0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x45, 0x63,
+	0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00,
+	0x12, 0x51, 0x0a, 0x0c, 0x56, 0x61, 0x63, 0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65,
+	0x12, 0x1e, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
+	0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+	0x1a, 0x1f, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x56, 0x61, 0x63,
+	0x75, 0x75, 0x6d, 0x56, 0x6f, 0x6c, 0x75, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+	0x65, 0x22, 0x00, 0x12, 0x6f, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
+	0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e,
+	0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73,
+	0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
+	0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+	0x5f, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x6e,
+	0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74,
+	0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+	0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72,
+	0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
+	0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4d,
+	0x61, 0x73, 0x74, 0x65, 0x72, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70,
+	0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x5a, 0x0a, 0x0f, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41,
+	0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x21, 0x2e, 0x6d, 0x61, 0x73, 0x74,
+	0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+	0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x6d,
+	0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x4c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64,
+	0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
+	0x22, 0x00, 0x12, 0x60, 0x0a, 0x11, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d,
+	0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72,
+	0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e,
+	0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d,
+	0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65,
+	0x41, 0x64, 0x6d, 0x69, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+	0x73, 0x65, 0x22, 0x00, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
+	0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x72, 0x69, 0x73, 0x6c, 0x75, 0x73, 0x66, 0x2f, 0x73, 0x65, 0x61,
+	0x77, 0x65, 0x65, 0x64, 0x66, 0x73, 0x2f, 0x77, 0x65, 0x65, 0x64, 0x2f, 0x70, 0x62, 0x2f, 0x6d,
+	0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
 }
 
 var (
diff --git a/weed/replication/sink/filersink/fetch_write.go b/weed/replication/sink/filersink/fetch_write.go
index a7392d856..b5ea3e2cb 100644
--- a/weed/replication/sink/filersink/fetch_write.go
+++ b/weed/replication/sink/filersink/fetch_write.go
@@ -71,29 +71,30 @@ func (fs *FilerSink) fetchAndWrite(sourceChunk *filer_pb.FileChunk, path string)
 	var auth security.EncodedJwt
 
 	if err := fs.WithFilerClient(func(client filer_pb.SeaweedFilerClient) error {
+		return util.Retry("assignVolume", func() error {
+			request := &filer_pb.AssignVolumeRequest{
+				Count:       1,
+				Replication: fs.replication,
+				Collection:  fs.collection,
+				TtlSec:      fs.ttlSec,
+				DataCenter:  fs.dataCenter,
+				DiskType:    fs.diskType,
+				Path:        path,
+			}
 
-		request := &filer_pb.AssignVolumeRequest{
-			Count:       1,
-			Replication: fs.replication,
-			Collection:  fs.collection,
-			TtlSec:      fs.ttlSec,
-			DataCenter:  fs.dataCenter,
-			DiskType:    fs.diskType,
-			Path:        path,
-		}
+			resp, err := client.AssignVolume(context.Background(), request)
+			if err != nil {
+				glog.V(0).Infof("assign volume failure %v: %v", request, err)
+				return err
+			}
+			if resp.Error != "" {
+				return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+			}
 
-		resp, err := client.AssignVolume(context.Background(), request)
-		if err != nil {
-			glog.V(0).Infof("assign volume failure %v: %v", request, err)
-			return err
-		}
-		if resp.Error != "" {
-			return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
-		}
+			fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
 
-		fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
-
-		return nil
+			return nil
+		})
 	}); err != nil {
 		return "", fmt.Errorf("filerGrpcAddress assign volume: %v", err)
 	}
diff --git a/weed/s3api/auth_credentials.go b/weed/s3api/auth_credentials.go
index b8af6381a..d9d26756f 100644
--- a/weed/s3api/auth_credentials.go
+++ b/weed/s3api/auth_credentials.go
@@ -3,14 +3,14 @@ package s3api
 import (
 	"fmt"
 	"github.com/chrislusf/seaweedfs/weed/filer"
-	"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
-	"io/ioutil"
-	"net/http"
-
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/iam_pb"
 	xhttp "github.com/chrislusf/seaweedfs/weed/s3api/http"
+	"github.com/chrislusf/seaweedfs/weed/s3api/s3_constants"
 	"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+	"io/ioutil"
+	"net/http"
+	"strings"
 )
 
 type Action string
@@ -255,11 +255,21 @@ func (identity *Identity) canDo(action Action, bucket string) bool {
 	limitedByBucket := string(action) + ":" + bucket
 	adminLimitedByBucket := s3_constants.ACTION_ADMIN + ":" + bucket
 	for _, a := range identity.Actions {
-		if string(a) == limitedByBucket {
-			return true
-		}
-		if string(a) == adminLimitedByBucket {
-			return true
+		act := string(a)
+		if strings.HasSuffix(act, "*") {
+			if strings.HasPrefix(limitedByBucket, act[:len(act)-1]) {
+				return true
+			}
+			if strings.HasPrefix(adminLimitedByBucket, act[:len(act)-1]) {
+				return true
+			}
+		} else {
+			if act == limitedByBucket {
+				return true
+			}
+			if act == adminLimitedByBucket {
+				return true
+			}
 		}
 	}
 	return false
diff --git a/weed/s3api/auth_signature_v4.go b/weed/s3api/auth_signature_v4.go
index 5ef7439c8..0df26e6fc 100644
--- a/weed/s3api/auth_signature_v4.go
+++ b/weed/s3api/auth_signature_v4.go
@@ -24,6 +24,7 @@ import (
 	"crypto/subtle"
 	"encoding/hex"
 	"github.com/chrislusf/seaweedfs/weed/s3api/s3err"
+	"io/ioutil"
 	"net/http"
 	"net/url"
 	"regexp"
@@ -132,6 +133,17 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
 	// Query string.
 	queryStr := req.URL.Query().Encode()
 
+	// Get hashed Payload
+	if signV4Values.Credential.scope.service != "s3" && hashedPayload == emptySHA256 && r.Body != nil {
+		buf, _ := ioutil.ReadAll(r.Body)
+		r.Body = ioutil.NopCloser(bytes.NewBuffer(buf))
+		b, _ := ioutil.ReadAll(bytes.NewBuffer(buf))
+		if len(b) != 0 {
+			bodyHash := sha256.Sum256(b)
+			hashedPayload = hex.EncodeToString(bodyHash[:])
+		}
+	}
+
 	// Get canonical request.
 	canonicalRequest := getCanonicalRequest(extractedSignedHeaders, hashedPayload, queryStr, req.URL.Path, req.Method)
 
@@ -139,7 +151,10 @@ func (iam *IdentityAccessManagement) doesSignatureMatch(hashedPayload string, r
 	stringToSign := getStringToSign(canonicalRequest, t, signV4Values.Credential.getScope())
 
 	// Get hmac signing key.
-	signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, signV4Values.Credential.scope.region)
+	signingKey := getSigningKey(cred.SecretKey,
+		signV4Values.Credential.scope.date,
+		signV4Values.Credential.scope.region,
+		signV4Values.Credential.scope.service)
 
 	// Calculate signature.
 	newSignature := getSignature(signingKey, stringToSign)
@@ -310,7 +325,7 @@ func (iam *IdentityAccessManagement) doesPolicySignatureV4Match(formValues http.
 	}
 
 	// Get signing key.
-	signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region)
+	signingKey := getSigningKey(cred.SecretKey, credHeader.scope.date, credHeader.scope.region, credHeader.scope.service)
 
 	// Get signature.
 	newSignature := getSignature(signingKey, formValues.Get("Policy"))
@@ -427,7 +442,10 @@ func (iam *IdentityAccessManagement) doesPresignedSignatureMatch(hashedPayload s
 	presignedStringToSign := getStringToSign(presignedCanonicalReq, t, pSignValues.Credential.getScope())
 
 	// Get hmac presigned signing key.
-	presignedSigningKey := getSigningKey(cred.SecretKey, pSignValues.Credential.scope.date, pSignValues.Credential.scope.region)
+	presignedSigningKey := getSigningKey(cred.SecretKey,
+		pSignValues.Credential.scope.date,
+		pSignValues.Credential.scope.region,
+		pSignValues.Credential.scope.service)
 
 	// Get new signature.
 	newSignature := getSignature(presignedSigningKey, presignedStringToSign)
@@ -655,11 +673,11 @@ func sumHMAC(key []byte, data []byte) []byte {
 }
 
 // getSigningKey hmac seed to calculate final signature.
-func getSigningKey(secretKey string, t time.Time, region string) []byte {
+func getSigningKey(secretKey string, t time.Time, region string, service string) []byte {
 	date := sumHMAC([]byte("AWS4"+secretKey), []byte(t.Format(yyyymmdd)))
 	regionBytes := sumHMAC(date, []byte(region))
-	service := sumHMAC(regionBytes, []byte("s3"))
-	signingKey := sumHMAC(service, []byte("aws4_request"))
+	serviceBytes := sumHMAC(regionBytes, []byte(service))
+	signingKey := sumHMAC(serviceBytes, []byte("aws4_request"))
 	return signingKey
 }
 
diff --git a/weed/s3api/auto_signature_v4_test.go b/weed/s3api/auto_signature_v4_test.go
index 4c8255768..b47cd5f2d 100644
--- a/weed/s3api/auto_signature_v4_test.go
+++ b/weed/s3api/auto_signature_v4_test.go
@@ -370,7 +370,7 @@ func preSignV4(req *http.Request, accessKeyID, secretAccessKey string, expires i
 	queryStr := strings.Replace(query.Encode(), "+", "%20", -1)
 	canonicalRequest := getCanonicalRequest(extractedSignedHeaders, unsignedPayload, queryStr, req.URL.Path, req.Method)
 	stringToSign := getStringToSign(canonicalRequest, date, scope)
-	signingKey := getSigningKey(secretAccessKey, date, region)
+	signingKey := getSigningKey(secretAccessKey, date, region, "s3")
 	signature := getSignature(signingKey, stringToSign)
 
 	req.URL.RawQuery = query.Encode()
diff --git a/weed/s3api/chunked_reader_v4.go b/weed/s3api/chunked_reader_v4.go
index 734c9faee..b163ec2f6 100644
--- a/weed/s3api/chunked_reader_v4.go
+++ b/weed/s3api/chunked_reader_v4.go
@@ -45,7 +45,7 @@ func getChunkSignature(secretKey string, seedSignature string, region string, da
 		hashedChunk
 
 	// Get hmac signing key.
-	signingKey := getSigningKey(secretKey, date, region)
+	signingKey := getSigningKey(secretKey, date, region, "s3")
 
 	// Calculate signature.
 	newSignature := getSignature(signingKey, stringToSign)
@@ -117,7 +117,7 @@ func (iam *IdentityAccessManagement) calculateSeedSignature(r *http.Request) (cr
 	stringToSign := getStringToSign(canonicalRequest, date, signV4Values.Credential.getScope())
 
 	// Get hmac signing key.
-	signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region)
+	signingKey := getSigningKey(cred.SecretKey, signV4Values.Credential.scope.date, region, "s3")
 
 	// Calculate signature.
 	newSignature := getSignature(signingKey, stringToSign)
diff --git a/weed/s3api/s3api_object_handlers.go b/weed/s3api/s3api_object_handlers.go
index b3cfd9ec7..a8dc34b54 100644
--- a/weed/s3api/s3api_object_handlers.go
+++ b/weed/s3api/s3api_object_handlers.go
@@ -311,7 +311,7 @@ func (s3a *S3ApiServer) proxyToFiler(w http.ResponseWriter, r *http.Request, des
 	}
 	defer util.CloseResponse(resp)
 
-	if resp.ContentLength == -1 || resp.StatusCode == 404 {
+	if (resp.ContentLength == -1 || resp.StatusCode == 404) && resp.StatusCode != 304 {
 		if r.Method != "DELETE" {
 			writeErrorResponse(w, s3err.ErrNoSuchKey, r.URL)
 			return
@@ -326,11 +326,7 @@ func passThroughResponse(proxyResponse *http.Response, w http.ResponseWriter) {
 	for k, v := range proxyResponse.Header {
 		w.Header()[k] = v
 	}
-	if proxyResponse.Header.Get("Content-Range") != "" && proxyResponse.StatusCode == 200 {
-		w.WriteHeader(http.StatusPartialContent)
-	} else {
-		w.WriteHeader(proxyResponse.StatusCode)
-	}
+	w.WriteHeader(proxyResponse.StatusCode)
 	io.Copy(w, proxyResponse.Body)
 }
 
diff --git a/weed/s3api/s3api_objects_list_handlers.go b/weed/s3api/s3api_objects_list_handlers.go
index 739cdd8f9..66c66d280 100644
--- a/weed/s3api/s3api_objects_list_handlers.go
+++ b/weed/s3api/s3api_objects_list_handlers.go
@@ -63,6 +63,14 @@ func (s3a *S3ApiServer) ListObjectsV2Handler(w http.ResponseWriter, r *http.Requ
 		writeErrorResponse(w, s3err.ErrInternalError, r.URL)
 		return
 	}
+
+	if len(response.Contents) == 0 {
+		if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
+			writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
+			return
+		}
+	}
+
 	responseV2 := &ListBucketResultV2{
 		XMLName:               response.XMLName,
 		Name:                  response.Name,
@@ -106,6 +114,13 @@ func (s3a *S3ApiServer) ListObjectsV1Handler(w http.ResponseWriter, r *http.Requ
 		return
 	}
 
+	if len(response.Contents) == 0 {
+		if exists, existErr := s3a.exists(s3a.option.BucketsPath, bucket, true); existErr == nil && !exists {
+			writeErrorResponse(w, s3err.ErrNoSuchBucket, r.URL)
+			return
+		}
+	}
+
 	writeSuccessResponseXML(w, encodeResponse(response))
 }
 
diff --git a/weed/server/common.go b/weed/server/common.go
index 5c5f1b8eb..9001a3b33 100644
--- a/weed/server/common.go
+++ b/weed/server/common.go
@@ -234,12 +234,12 @@ func adjustHeaderContentDisposition(w http.ResponseWriter, r *http.Request, file
 	}
 }
 
-func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64) error) {
+func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64, mimeType string, writeFn func(writer io.Writer, offset int64, size int64, httpStatusCode int) error) {
 	rangeReq := r.Header.Get("Range")
 
 	if rangeReq == "" {
 		w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
-		if err := writeFn(w, 0, totalSize); err != nil {
+		if err := writeFn(w, 0, totalSize, 0); err != nil {
 			http.Error(w, err.Error(), http.StatusInternalServerError)
 			return
 		}
@@ -279,7 +279,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
 		w.Header().Set("Content-Length", strconv.FormatInt(ra.length, 10))
 		w.Header().Set("Content-Range", ra.contentRange(totalSize))
 
-		err = writeFn(w, ra.start, ra.length)
+		err = writeFn(w, ra.start, ra.length, http.StatusPartialContent)
 		if err != nil {
 			http.Error(w, err.Error(), http.StatusInternalServerError)
 			return
@@ -307,7 +307,7 @@ func processRangeRequest(r *http.Request, w http.ResponseWriter, totalSize int64
 				pw.CloseWithError(e)
 				return
 			}
-			if e = writeFn(part, ra.start, ra.length); e != nil {
+			if e = writeFn(part, ra.start, ra.length, 0); e != nil {
 				pw.CloseWithError(e)
 				return
 			}
diff --git a/weed/server/filer_grpc_server.go b/weed/server/filer_grpc_server.go
index a4bb721ef..3821de6a9 100644
--- a/weed/server/filer_grpc_server.go
+++ b/weed/server/filer_grpc_server.go
@@ -63,7 +63,7 @@ func (fs *FilerServer) ListEntries(req *filer_pb.ListEntriesRequest, stream file
 	var listErr error
 	for limit > 0 {
 		var hasEntries bool
-		lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", func(entry *filer.Entry) bool {
+		lastFileName, listErr = fs.filer.StreamListDirectoryEntries(stream.Context(), util.FullPath(req.Directory), lastFileName, includeLastFile, int64(paginationLimit), req.Prefix, "", "", func(entry *filer.Entry) bool {
 			hasEntries = true
 			if err = stream.Send(&filer_pb.ListEntriesResponse{
 				Entry: &filer_pb.Entry{
diff --git a/weed/server/filer_grpc_server_rename.go b/weed/server/filer_grpc_server_rename.go
index c1e5bc789..eadb970d5 100644
--- a/weed/server/filer_grpc_server_rename.go
+++ b/weed/server/filer_grpc_server_rename.go
@@ -74,7 +74,7 @@ func (fs *FilerServer) moveFolderSubEntries(ctx context.Context, oldParent util.
 	includeLastFile := false
 	for {
 
-		entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "")
+		entries, hasMore, err := fs.filer.ListDirectoryEntries(ctx, currentDirPath, lastFileName, includeLastFile, 1024, "", "", "")
 		if err != nil {
 			return err
 		}
diff --git a/weed/server/filer_server_handlers_read.go b/weed/server/filer_server_handlers_read.go
index f90b070a2..1d90871d8 100644
--- a/weed/server/filer_server_handlers_read.go
+++ b/weed/server/filer_server_handlers_read.go
@@ -79,7 +79,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 		w.Header().Set("Last-Modified", entry.Attr.Mtime.UTC().Format(http.TimeFormat))
 		if r.Header.Get("If-Modified-Since") != "" {
 			if t, parseError := time.Parse(http.TimeFormat, r.Header.Get("If-Modified-Since")); parseError == nil {
-				if t.After(entry.Attr.Mtime) {
+				if !t.Before(entry.Attr.Mtime) {
 					w.WriteHeader(http.StatusNotModified)
 					return
 				}
@@ -131,9 +131,6 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 
 	if r.Method == "HEAD" {
 		w.Header().Set("Content-Length", strconv.FormatInt(totalSize, 10))
-		processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
-			return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, true)
-		})
 		return
 	}
 
@@ -153,7 +150,10 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 		}
 	}
 
-	processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+	processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
+		if httpStatusCode != 0 {
+			w.WriteHeader(httpStatusCode)
+		}
 		if offset+size <= int64(len(entry.Content)) {
 			_, err := writer.Write(entry.Content[offset : offset+size])
 			if err != nil {
@@ -161,7 +161,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request,
 			}
 			return err
 		}
-		return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size, false)
+		return filer.StreamContent(fs.filer.MasterClient, writer, entry.Chunks, offset, size)
 	})
 
 }
diff --git a/weed/server/filer_server_handlers_read_dir.go b/weed/server/filer_server_handlers_read_dir.go
index 9cf79ab41..307c411b6 100644
--- a/weed/server/filer_server_handlers_read_dir.go
+++ b/weed/server/filer_server_handlers_read_dir.go
@@ -35,8 +35,9 @@ func (fs *FilerServer) listDirectoryHandler(w http.ResponseWriter, r *http.Reque
 
 	lastFileName := r.FormValue("lastFileName")
 	namePattern := r.FormValue("namePattern")
+	namePatternExclude := r.FormValue("namePatternExclude")
 
-	entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern)
+	entries, shouldDisplayLoadMore, err := fs.filer.ListDirectoryEntries(context.Background(), util.FullPath(path), lastFileName, false, int64(limit), "", namePattern, namePatternExclude)
 
 	if err != nil {
 		glog.V(0).Infof("listDirectory %s %s %d: %s", path, lastFileName, limit, err)
diff --git a/weed/server/filer_server_handlers_tagging.go b/weed/server/filer_server_handlers_tagging.go
index 50b3a2c06..70b5327d6 100644
--- a/weed/server/filer_server_handlers_tagging.go
+++ b/weed/server/filer_server_handlers_tagging.go
@@ -78,11 +78,27 @@ func (fs *FilerServer) DeleteTaggingHandler(w http.ResponseWriter, r *http.Reque
 		existingEntry.Extended = make(map[string][]byte)
 	}
 
+	// parse out tags to be deleted
+	toDelete := strings.Split(r.URL.Query().Get("tagging"), ",")
+	deletions := make(map[string]struct{})
+	for _, deletion := range toDelete {
+		deletions[deletion] = struct{}{}
+	}
+
+	// delete all tags or specific tags
 	hasDeletion := false
 	for header, _ := range existingEntry.Extended {
 		if strings.HasPrefix(header, needle.PairNamePrefix) {
-			delete(existingEntry.Extended, header)
-			hasDeletion = true
+			if len(deletions) == 0 {
+				delete(existingEntry.Extended, header)
+				hasDeletion = true
+			} else {
+				tag := header[len(needle.PairNamePrefix):]
+				if _, found := deletions[tag]; found {
+					delete(existingEntry.Extended, header)
+					hasDeletion = true
+				}
+			}
 		}
 	}
 
diff --git a/weed/server/filer_server_handlers_write_autochunk.go b/weed/server/filer_server_handlers_write_autochunk.go
index 2808042c7..c4f10d94e 100644
--- a/weed/server/filer_server_handlers_write_autochunk.go
+++ b/weed/server/filer_server_handlers_write_autochunk.go
@@ -142,6 +142,14 @@ func (fs *FilerServer) saveMetaData(ctx context.Context, r *http.Request, fileNa
 		if fileName != "" {
 			path += fileName
 		}
+	} else {
+		if fileName != "" {
+			if possibleDirEntry, findDirErr := fs.filer.FindEntry(ctx, util.FullPath(path)); findDirErr == nil {
+				if possibleDirEntry.IsDirectory() {
+					path += "/" + fileName
+				}
+			}
+		}
 	}
 
 	var entry *filer.Entry
diff --git a/weed/server/filer_server_handlers_write_upload.go b/weed/server/filer_server_handlers_write_upload.go
index 03db942c6..540def563 100644
--- a/weed/server/filer_server_handlers_write_upload.go
+++ b/weed/server/filer_server_handlers_write_upload.go
@@ -1,14 +1,13 @@
 package weed_server
 
 import (
+	"bytes"
 	"crypto/md5"
 	"hash"
 	"io"
 	"io/ioutil"
 	"net/http"
-	"runtime"
 	"strings"
-	"sync"
 	"time"
 
 	"github.com/chrislusf/seaweedfs/weed/filer"
@@ -20,136 +19,82 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
-var (
-	limitedUploadProcessor = util.NewLimitedOutOfOrderProcessor(int32(runtime.NumCPU()))
-)
+func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) ([]*filer_pb.FileChunk, hash.Hash, int64, error, []byte) {
+	var fileChunks []*filer_pb.FileChunk
 
-func (fs *FilerServer) uploadReaderToChunks(w http.ResponseWriter, r *http.Request, reader io.Reader, chunkSize int32, fileName, contentType string, contentLength int64, so *operation.StorageOption) (fileChunks []*filer_pb.FileChunk, md5Hash hash.Hash, dataSize int64, err error, smallContent []byte) {
-
-	md5Hash = md5.New()
+	md5Hash := md5.New()
 	var partReader = ioutil.NopCloser(io.TeeReader(reader, md5Hash))
 
-	// save small content directly
-	if !isAppend(r) && ((0 < contentLength && contentLength < fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && contentLength < 4*1024) {
-		smallContent, err = ioutil.ReadAll(partReader)
-		dataSize = int64(len(smallContent))
-		return
-	}
+	chunkOffset := int64(0)
+	var smallContent []byte
 
-	resultsChan := make(chan *ChunkCreationResult, 2)
+	for {
+		limitedReader := io.LimitReader(partReader, int64(chunkSize))
 
-	var waitForAllData sync.WaitGroup
-	waitForAllData.Add(1)
-	go func() {
-		// process upload results
-		defer waitForAllData.Done()
-		for result := range resultsChan {
-			if result.err != nil {
-				err = result.err
+		data, err := ioutil.ReadAll(limitedReader)
+		if err != nil {
+			return nil, nil, 0, err, nil
+		}
+		if chunkOffset == 0 && !isAppend(r) {
+			if len(data) < int(fs.option.SaveToFilerLimit) || strings.HasPrefix(r.URL.Path, filer.DirectoryEtcRoot) && len(data) < 4*1024 {
+				smallContent = data
+				chunkOffset += int64(len(data))
+				break
+			}
+		}
+		dataReader := util.NewBytesReader(data)
+
+		// retry to assign a different file id
+		var fileId, urlLocation string
+		var auth security.EncodedJwt
+		var assignErr, uploadErr error
+		var uploadResult *operation.UploadResult
+		for i := 0; i < 3; i++ {
+			// assign one file id for one chunk
+			fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
+			if assignErr != nil {
+				return nil, nil, 0, assignErr, nil
+			}
+
+			// upload the chunk to the volume server
+			uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
+			if uploadErr != nil {
+				time.Sleep(251 * time.Millisecond)
 				continue
 			}
-
-			// Save to chunk manifest structure
-			fileChunks = append(fileChunks, result.chunk)
+			break
 		}
-	}()
-
-	var lock sync.Mutex
-	readOffset := int64(0)
-	var wg sync.WaitGroup
-
-	for err == nil {
-
-		wg.Add(1)
-		request := func() {
-			defer wg.Done()
-
-			var localOffset int64
-			// read from the input
-			lock.Lock()
-			localOffset = readOffset
-			limitedReader := io.LimitReader(partReader, int64(chunkSize))
-			data, readErr := ioutil.ReadAll(limitedReader)
-			readOffset += int64(len(data))
-			lock.Unlock()
-			// handle read errors
-			if readErr != nil {
-				if readErr != io.EOF {
-					if err == nil {
-						err = readErr
-					}
-					resultsChan <- &ChunkCreationResult{
-						err: readErr,
-					}
-				}
-				return
-			}
-			if len(data) == 0 {
-				readErr = io.EOF
-				return
-			}
-
-			// upload
-			dataReader := util.NewBytesReader(data)
-			fileId, uploadResult, uploadErr := fs.doCreateChunk(w, r, so, dataReader, fileName, contentType)
-			if uploadErr != nil {
-				if err == nil {
-					err = uploadErr
-				}
-				resultsChan <- &ChunkCreationResult{
-					err: uploadErr,
-				}
-				return
-			}
-
-			glog.V(4).Infof("uploaded %s to %s [%d,%d)", fileName, fileId, localOffset, localOffset+int64(uploadResult.Size))
-
-			// send back uploaded file chunk
-			resultsChan <- &ChunkCreationResult{
-				chunk: uploadResult.ToPbFileChunk(fileId, localOffset),
-			}
-
-		}
-		limitedUploadProcessor.Execute(request)
-	}
-
-	go func() {
-		wg.Wait()
-		close(resultsChan)
-	}()
-
-	waitForAllData.Wait()
-
-	return fileChunks, md5Hash, readOffset, err, nil
-}
-
-type ChunkCreationResult struct {
-	chunk *filer_pb.FileChunk
-	err   error
-}
-
-func (fs *FilerServer) doCreateChunk(w http.ResponseWriter, r *http.Request, so *operation.StorageOption, dataReader *util.BytesReader, fileName string, contentType string) (string, *operation.UploadResult, error) {
-	// retry to assign a different file id
-	var fileId, urlLocation string
-	var auth security.EncodedJwt
-	var assignErr, uploadErr error
-	var uploadResult *operation.UploadResult
-	for i := 0; i < 3; i++ {
-		// assign one file id for one chunk
-		fileId, urlLocation, auth, assignErr = fs.assignNewFileInfo(so)
-		if assignErr != nil {
-			return "", nil, assignErr
-		}
-
-		// upload the chunk to the volume server
-		uploadResult, uploadErr, _ = fs.doUpload(urlLocation, w, r, dataReader, fileName, contentType, nil, auth)
 		if uploadErr != nil {
-			time.Sleep(251 * time.Millisecond)
-			continue
+			return nil, nil, 0, uploadErr, nil
+		}
+
+		// if last chunk exhausted the reader exactly at the border
+		if uploadResult.Size == 0 {
+			break
+		}
+		if chunkOffset == 0 {
+			uploadedMd5 := util.Base64Md5ToBytes(uploadResult.ContentMd5)
+			readedMd5 := md5Hash.Sum(nil)
+			if !bytes.Equal(uploadedMd5, readedMd5) {
+				glog.Errorf("md5 %x does not match %x uploaded chunk %s to the volume server", readedMd5, uploadedMd5, uploadResult.Name)
+			}
+		}
+
+		// Save to chunk manifest structure
+		fileChunks = append(fileChunks, uploadResult.ToPbFileChunk(fileId, chunkOffset))
+
+		glog.V(4).Infof("uploaded %s chunk %d to %s [%d,%d)", fileName, len(fileChunks), fileId, chunkOffset, chunkOffset+int64(uploadResult.Size))
+
+		// reset variables for the next chunk
+		chunkOffset = chunkOffset + int64(uploadResult.Size)
+
+		// if last chunk was not at full chunk size, but already exhausted the reader
+		if int64(uploadResult.Size) < int64(chunkSize) {
+			break
 		}
-		break
 	}
-	return fileId, uploadResult, uploadErr
+
+	return fileChunks, md5Hash, chunkOffset, nil, smallContent
 }
 
 func (fs *FilerServer) doUpload(urlLocation string, w http.ResponseWriter, r *http.Request, limitedReader io.Reader, fileName string, contentType string, pairMap map[string]string, auth security.EncodedJwt) (*operation.UploadResult, error, []byte) {
diff --git a/weed/server/filer_ui/breadcrumb.go b/weed/server/filer_ui/breadcrumb.go
index f21cce7d1..5016117a8 100644
--- a/weed/server/filer_ui/breadcrumb.go
+++ b/weed/server/filer_ui/breadcrumb.go
@@ -1,4 +1,4 @@
-package master_ui
+package filer_ui
 
 import (
 	"strings"
diff --git a/weed/server/filer_ui/templates.go b/weed/server/filer_ui/templates.go
index 3f0647119..648b97f22 100644
--- a/weed/server/filer_ui/templates.go
+++ b/weed/server/filer_ui/templates.go
@@ -1,4 +1,4 @@
-package master_ui
+package filer_ui
 
 import (
 	"github.com/dustin/go-humanize"
diff --git a/weed/server/gateway_server.go b/weed/server/gateway_server.go
new file mode 100644
index 000000000..608217ed7
--- /dev/null
+++ b/weed/server/gateway_server.go
@@ -0,0 +1,106 @@
+package weed_server
+
+import (
+	"github.com/chrislusf/seaweedfs/weed/operation"
+	"google.golang.org/grpc"
+	"math/rand"
+	"net/http"
+
+	"github.com/chrislusf/seaweedfs/weed/util"
+
+	_ "github.com/chrislusf/seaweedfs/weed/filer/cassandra"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/elastic/v7"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/etcd"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/hbase"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb2"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/leveldb3"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/mongodb"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/mysql"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/mysql2"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/postgres"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/postgres2"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/redis"
+	_ "github.com/chrislusf/seaweedfs/weed/filer/redis2"
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	_ "github.com/chrislusf/seaweedfs/weed/notification/aws_sqs"
+	_ "github.com/chrislusf/seaweedfs/weed/notification/gocdk_pub_sub"
+	_ "github.com/chrislusf/seaweedfs/weed/notification/google_pub_sub"
+	_ "github.com/chrislusf/seaweedfs/weed/notification/kafka"
+	_ "github.com/chrislusf/seaweedfs/weed/notification/log"
+	"github.com/chrislusf/seaweedfs/weed/security"
+)
+
+type GatewayOption struct {
+	Masters  []string
+	Filers   []string
+	MaxMB    int
+	IsSecure bool
+}
+
+type GatewayServer struct {
+	option         *GatewayOption
+	secret         security.SigningKey
+	grpcDialOption grpc.DialOption
+}
+
+func NewGatewayServer(defaultMux *http.ServeMux, option *GatewayOption) (fs *GatewayServer, err error) {
+
+	fs = &GatewayServer{
+		option:         option,
+		grpcDialOption: security.LoadClientTLS(util.GetViper(), "grpc.client"),
+	}
+
+	if len(option.Masters) == 0 {
+		glog.Fatal("master list is required!")
+	}
+
+	defaultMux.HandleFunc("/blobs/", fs.blobsHandler)
+	defaultMux.HandleFunc("/files/", fs.filesHandler)
+	defaultMux.HandleFunc("/topics/", fs.topicsHandler)
+
+	return fs, nil
+}
+
+func (fs *GatewayServer) getMaster() string {
+	randMaster := rand.Intn(len(fs.option.Masters))
+	return fs.option.Masters[randMaster]
+}
+
+func (fs *GatewayServer) blobsHandler(w http.ResponseWriter, r *http.Request) {
+	switch r.Method {
+	case "DELETE":
+		chunkId := r.URL.Path[len("/blobs/"):]
+		fullUrl, err := operation.LookupFileId(fs.getMaster, chunkId)
+		if err != nil {
+			writeJsonError(w, r, http.StatusNotFound, err)
+			return
+		}
+		var jwtAuthorization security.EncodedJwt
+		if fs.option.IsSecure {
+			jwtAuthorization = operation.LookupJwt(fs.getMaster(), chunkId)
+		}
+		body, statusCode, err := util.DeleteProxied(fullUrl, string(jwtAuthorization))
+		if err != nil {
+			writeJsonError(w, r, http.StatusNotFound, err)
+			return
+		}
+		w.WriteHeader(statusCode)
+		w.Write(body)
+	case "POST":
+		submitForClientHandler(w, r, fs.getMaster, fs.grpcDialOption)
+	}
+}
+
+func (fs *GatewayServer) filesHandler(w http.ResponseWriter, r *http.Request) {
+	switch r.Method {
+	case "DELETE":
+	case "POST":
+	}
+}
+
+func (fs *GatewayServer) topicsHandler(w http.ResponseWriter, r *http.Request) {
+	switch r.Method {
+	case "POST":
+	}
+}
diff --git a/weed/server/master_grpc_server_admin.go b/weed/server/master_grpc_server_admin.go
index 7e7dcb36b..93c9e4e4e 100644
--- a/weed/server/master_grpc_server_admin.go
+++ b/weed/server/master_grpc_server_admin.go
@@ -3,6 +3,7 @@ package weed_server
 import (
 	"context"
 	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/glog"
 	"math/rand"
 	"sync"
 	"time"
@@ -60,6 +61,7 @@ const (
 type AdminLock struct {
 	accessSecret   int64
 	accessLockTime time.Time
+	lastClient     string
 }
 
 type AdminLocks struct {
@@ -73,14 +75,15 @@ func NewAdminLocks() *AdminLocks {
 	}
 }
 
-func (locks *AdminLocks) isLocked(lockName string) bool {
+func (locks *AdminLocks) isLocked(lockName string) (clientName string, isLocked bool) {
 	locks.RLock()
 	defer locks.RUnlock()
 	adminLock, found := locks.locks[lockName]
 	if !found {
-		return false
+		return "", false
 	}
-	return adminLock.accessLockTime.Add(LockDuration).After(time.Now())
+	glog.V(4).Infof("isLocked %v", adminLock.lastClient)
+	return adminLock.lastClient, adminLock.accessLockTime.Add(LockDuration).After(time.Now())
 }
 
 func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64) bool {
@@ -93,12 +96,13 @@ func (locks *AdminLocks) isValidToken(lockName string, ts time.Time, token int64
 	return adminLock.accessLockTime.Equal(ts) && adminLock.accessSecret == token
 }
 
-func (locks *AdminLocks) generateToken(lockName string) (ts time.Time, token int64) {
+func (locks *AdminLocks) generateToken(lockName string, clientName string) (ts time.Time, token int64) {
 	locks.Lock()
 	defer locks.Unlock()
 	lock := &AdminLock{
 		accessSecret:   rand.Int63(),
 		accessLockTime: time.Now(),
+		lastClient:     clientName,
 	}
 	locks.locks[lockName] = lock
 	return lock.accessLockTime, lock.accessSecret
@@ -113,18 +117,19 @@ func (locks *AdminLocks) deleteLock(lockName string) {
 func (ms *MasterServer) LeaseAdminToken(ctx context.Context, req *master_pb.LeaseAdminTokenRequest) (*master_pb.LeaseAdminTokenResponse, error) {
 	resp := &master_pb.LeaseAdminTokenResponse{}
 
-	if ms.adminLocks.isLocked(req.LockName) {
+	if lastClient, isLocked := ms.adminLocks.isLocked(req.LockName); isLocked {
+		glog.V(4).Infof("LeaseAdminToken %v", lastClient)
 		if req.PreviousToken != 0 && ms.adminLocks.isValidToken(req.LockName, time.Unix(0, req.PreviousLockTime), req.PreviousToken) {
 			// for renew
-			ts, token := ms.adminLocks.generateToken(req.LockName)
+			ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName)
 			resp.Token, resp.LockTsNs = token, ts.UnixNano()
 			return resp, nil
 		}
 		// refuse since still locked
-		return resp, fmt.Errorf("already locked")
+		return resp, fmt.Errorf("already locked by " + lastClient)
 	}
 	// for fresh lease request
-	ts, token := ms.adminLocks.generateToken(req.LockName)
+	ts, token := ms.adminLocks.generateToken(req.LockName, req.ClientName)
 	resp.Token, resp.LockTsNs = token, ts.UnixNano()
 	return resp, nil
 }
diff --git a/weed/server/master_grpc_server_volume.go b/weed/server/master_grpc_server_volume.go
index 156afd4a1..3a4951cc5 100644
--- a/weed/server/master_grpc_server_volume.go
+++ b/weed/server/master_grpc_server_volume.go
@@ -4,15 +4,68 @@ import (
 	"context"
 	"fmt"
 	"github.com/chrislusf/raft"
-	"github.com/chrislusf/seaweedfs/weed/storage/types"
+	"reflect"
+	"sync"
+	"time"
 
+	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/pb/master_pb"
 	"github.com/chrislusf/seaweedfs/weed/security"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
 	"github.com/chrislusf/seaweedfs/weed/storage/super_block"
+	"github.com/chrislusf/seaweedfs/weed/storage/types"
 	"github.com/chrislusf/seaweedfs/weed/topology"
 )
 
+func (ms *MasterServer) ProcessGrowRequest() {
+	go func() {
+		filter := sync.Map{}
+		for {
+			req, ok := <-ms.vgCh
+			if !ok {
+				break
+			}
+
+			if !ms.Topo.IsLeader() {
+				//discard buffered requests
+				time.Sleep(time.Second * 1)
+				continue
+			}
+
+			// filter out identical requests being processed
+			found := false
+			filter.Range(func(k, v interface{}) bool {
+				if reflect.DeepEqual(k, req) {
+					found = true
+				}
+				return !found
+			})
+
+			// not atomic but it's okay
+			if !found && ms.shouldVolumeGrow(req.Option) {
+				filter.Store(req, nil)
+				// we have lock called inside vg
+				go func() {
+					glog.V(1).Infoln("starting automatic volume grow")
+					start := time.Now()
+					_, err := ms.vg.AutomaticGrowByType(req.Option, ms.grpcDialOption, ms.Topo, req.Count)
+					glog.V(1).Infoln("finished automatic volume grow, cost ", time.Now().Sub(start))
+
+					if req.ErrCh != nil {
+						req.ErrCh <- err
+						close(req.ErrCh)
+					}
+
+					filter.Delete(req)
+				}()
+
+			} else {
+				glog.V(4).Infoln("discard volume grow request")
+			}
+		}
+	}()
+}
+
 func (ms *MasterServer) LookupVolume(ctx context.Context, req *master_pb.LookupVolumeRequest) (*master_pb.LookupVolumeResponse, error) {
 
 	if !ms.Topo.IsLeader() {
@@ -68,38 +121,45 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
 		ReplicaPlacement:   replicaPlacement,
 		Ttl:                ttl,
 		DiskType:           diskType,
-		Prealloacte:        ms.preallocateSize,
+		Preallocate:        ms.preallocateSize,
 		DataCenter:         req.DataCenter,
 		Rack:               req.Rack,
 		DataNode:           req.DataNode,
 		MemoryMapMaxSizeMb: req.MemoryMapMaxSizeMb,
 	}
 
-	if !ms.Topo.HasWritableVolume(option) {
+	if ms.shouldVolumeGrow(option) {
 		if ms.Topo.AvailableSpaceFor(option) <= 0 {
 			return nil, fmt.Errorf("no free volumes left for " + option.String())
 		}
-		ms.vgLock.Lock()
-		if !ms.Topo.HasWritableVolume(option) {
-			if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, int(req.WritableVolumeCount)); err != nil {
-				ms.vgLock.Unlock()
-				return nil, fmt.Errorf("Cannot grow volume group! %v", err)
-			}
+		ms.vgCh <- &topology.VolumeGrowRequest{
+			Option: option,
+			Count:  int(req.WritableVolumeCount),
 		}
-		ms.vgLock.Unlock()
-	}
-	fid, count, dn, err := ms.Topo.PickForWrite(req.Count, option)
-	if err != nil {
-		return nil, fmt.Errorf("%v", err)
 	}
 
-	return &master_pb.AssignResponse{
-		Fid:       fid,
-		Url:       dn.Url(),
-		PublicUrl: dn.PublicUrl,
-		Count:     count,
-		Auth:      string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
-	}, nil
+	var (
+		lastErr    error
+		maxTimeout = time.Second * 10
+		startTime  = time.Now()
+	)
+	
+	for time.Now().Sub(startTime) < maxTimeout {
+		fid, count, dn, err := ms.Topo.PickForWrite(req.Count, option)
+		if err == nil {
+			return &master_pb.AssignResponse{
+				Fid:       fid,
+				Url:       dn.Url(),
+				PublicUrl: dn.PublicUrl,
+				Count:     count,
+				Auth:      string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
+			}, nil
+		}
+		//glog.V(4).Infoln("waiting for volume growing...")
+		lastErr = err
+		time.Sleep(200 * time.Millisecond)
+	}
+	return nil, lastErr
 }
 
 func (ms *MasterServer) Statistics(ctx context.Context, req *master_pb.StatisticsRequest) (*master_pb.StatisticsResponse, error) {
diff --git a/weed/server/master_server.go b/weed/server/master_server.go
index e2b2df18d..838803908 100644
--- a/weed/server/master_server.go
+++ b/weed/server/master_server.go
@@ -51,9 +51,9 @@ type MasterServer struct {
 
 	preallocateSize int64
 
-	Topo   *topology.Topology
-	vg     *topology.VolumeGrowth
-	vgLock sync.Mutex
+	Topo *topology.Topology
+	vg   *topology.VolumeGrowth
+	vgCh chan *topology.VolumeGrowRequest
 
 	boundedLeaderChan chan int
 
@@ -82,6 +82,12 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
 	v.SetDefault("master.replication.treat_replication_as_minimums", false)
 	replicationAsMin := v.GetBool("master.replication.treat_replication_as_minimums")
 
+	v.SetDefault("master.volume_growth.copy_1", 7)
+	v.SetDefault("master.volume_growth.copy_2", 6)
+	v.SetDefault("master.volume_growth.copy_3", 3)
+	v.SetDefault("master.volume_growth.copy_other", 1)
+	v.SetDefault("master.volume_growth.threshold", 0.9)
+
 	var preallocateSize int64
 	if option.VolumePreallocate {
 		preallocateSize = int64(option.VolumeSizeLimitMB) * (1 << 20)
@@ -91,6 +97,7 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
 	ms := &MasterServer{
 		option:          option,
 		preallocateSize: preallocateSize,
+		vgCh:            make(chan *topology.VolumeGrowRequest, 1 << 6),
 		clientChans:     make(map[string]chan *master_pb.VolumeLocation),
 		grpcDialOption:  grpcDialOption,
 		MasterClient:    wdclient.NewMasterClient(grpcDialOption, "master", option.Host, 0, "", peers),
@@ -128,7 +135,14 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers []string) *Maste
 		r.HandleFunc("/{fileId}", ms.redirectHandler)
 	}
 
-	ms.Topo.StartRefreshWritableVolumes(ms.grpcDialOption, ms.option.GarbageThreshold, ms.preallocateSize)
+	ms.Topo.StartRefreshWritableVolumes(
+		ms.grpcDialOption,
+		ms.option.GarbageThreshold,
+		v.GetFloat64("master.volume_growth.threshold"),
+		ms.preallocateSize,
+	)
+
+	ms.ProcessGrowRequest()
 
 	ms.startAdminScripts()
 
diff --git a/weed/server/master_server_handlers.go b/weed/server/master_server_handlers.go
index a9fecc5bd..974b3308f 100644
--- a/weed/server/master_server_handlers.go
+++ b/weed/server/master_server_handlers.go
@@ -10,6 +10,7 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/security"
 	"github.com/chrislusf/seaweedfs/weed/stats"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
+	"github.com/chrislusf/seaweedfs/weed/topology"
 )
 
 func (ms *MasterServer) lookupVolumeId(vids []string, collection string) (volumeLocations map[string]operation.LookupResult) {
@@ -111,19 +112,20 @@ func (ms *MasterServer) dirAssignHandler(w http.ResponseWriter, r *http.Request)
 		return
 	}
 
-	if !ms.Topo.HasWritableVolume(option) {
+	if ms.shouldVolumeGrow(option) {
 		if ms.Topo.AvailableSpaceFor(option) <= 0 {
 			writeJsonQuiet(w, r, http.StatusNotFound, operation.AssignResult{Error: "No free volumes left for " + option.String()})
 			return
 		}
-		ms.vgLock.Lock()
-		defer ms.vgLock.Unlock()
-		if !ms.Topo.HasWritableVolume(option) {
-			if _, err = ms.vg.AutomaticGrowByType(option, ms.grpcDialOption, ms.Topo, writableVolumeCount); err != nil {
-				writeJsonError(w, r, http.StatusInternalServerError,
-					fmt.Errorf("Cannot grow volume group! %v", err))
-				return
-			}
+		errCh := make(chan error, 1)
+		ms.vgCh <- &topology.VolumeGrowRequest{
+			Option: option,
+			Count:  writableVolumeCount,
+			ErrCh:  errCh,
+		}
+		if err := <- errCh; err != nil {
+			writeJsonError(w, r, http.StatusInternalServerError, fmt.Errorf("cannot grow volume group! %v", err))
+			return
 		}
 	}
 	fid, count, dn, err := ms.Topo.PickForWrite(requestedCount, option)
diff --git a/weed/server/master_server_handlers_admin.go b/weed/server/master_server_handlers_admin.go
index f24d4e924..fb16ef78c 100644
--- a/weed/server/master_server_handlers_admin.go
+++ b/weed/server/master_server_handlers_admin.go
@@ -3,7 +3,6 @@ package weed_server
 import (
 	"context"
 	"fmt"
-	"github.com/chrislusf/seaweedfs/weed/storage/types"
 	"math/rand"
 	"net/http"
 	"strconv"
@@ -14,6 +13,7 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/storage/backend/memory_map"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
 	"github.com/chrislusf/seaweedfs/weed/storage/super_block"
+	"github.com/chrislusf/seaweedfs/weed/storage/types"
 	"github.com/chrislusf/seaweedfs/weed/topology"
 	"github.com/chrislusf/seaweedfs/weed/util"
 )
@@ -136,9 +136,11 @@ func (ms *MasterServer) submitFromMasterServerHandler(w http.ResponseWriter, r *
 	}
 }
 
-func (ms *MasterServer) HasWritableVolume(option *topology.VolumeGrowOption) bool {
+func (ms *MasterServer) shouldVolumeGrow(option *topology.VolumeGrowOption) bool {
 	vl := ms.Topo.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
-	return vl.GetActiveVolumeCount(option) > 0
+	active, high := vl.GetActiveVolumeCount(option)
+	//glog.V(0).Infof("active volume: %d, high usage volume: %d\n", active, high)
+	return active <= high
 }
 
 func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGrowOption, error) {
@@ -172,7 +174,7 @@ func (ms *MasterServer) getVolumeGrowOption(r *http.Request) (*topology.VolumeGr
 		ReplicaPlacement:   replicaPlacement,
 		Ttl:                ttl,
 		DiskType:           diskType,
-		Prealloacte:        preallocate,
+		Preallocate:        preallocate,
 		DataCenter:         r.FormValue("dataCenter"),
 		Rack:               r.FormValue("rack"),
 		DataNode:           r.FormValue("dataNode"),
diff --git a/weed/server/master_server_handlers_ui.go b/weed/server/master_server_handlers_ui.go
index 9cd58158b..3822c6113 100644
--- a/weed/server/master_server_handlers_ui.go
+++ b/weed/server/master_server_handlers_ui.go
@@ -14,17 +14,19 @@ func (ms *MasterServer) uiStatusHandler(w http.ResponseWriter, r *http.Request)
 	infos := make(map[string]interface{})
 	infos["Up Time"] = time.Now().Sub(startTime).String()
 	args := struct {
-		Version    string
-		Topology   interface{}
-		RaftServer raft.Server
-		Stats      map[string]interface{}
-		Counters   *stats.ServerStats
+		Version           string
+		Topology          interface{}
+		RaftServer        raft.Server
+		Stats             map[string]interface{}
+		Counters          *stats.ServerStats
+		VolumeSizeLimitMB uint
 	}{
 		util.Version(),
 		ms.Topo.ToMap(),
 		ms.Topo.RaftServer,
 		infos,
 		serverStats,
+		ms.option.VolumeSizeLimitMB,
 	}
 	ui.StatusTpl.Execute(w, args)
 }
diff --git a/weed/server/master_ui/templates.go b/weed/server/master_ui/templates.go
index 60873f6aa..31b6353e9 100644
--- a/weed/server/master_ui/templates.go
+++ b/weed/server/master_ui/templates.go
@@ -22,8 +22,12 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
       <div class="row">
         <div class="col-sm-6">
           <h2>Cluster status</h2>
-          <table class="table">
+          <table class="table table-condensed table-striped">
             <tbody>
+              <tr>
+                <th>Volume Size Limit</th>
+                <td>{{ .VolumeSizeLimitMB }}MB</td>
+              </tr>
               <tr>
                 <th>Free</th>
                 <td>{{ .Topology.Free }}</td>
@@ -38,8 +42,8 @@ var StatusTpl = template.Must(template.New("status").Parse(`<!DOCTYPE html>
                 <td><a href="http://{{ .Leader }}">{{ .Leader }}</a></td>
               </tr>
               <tr>
-                <td class="col-sm-2 field-label"><label>Other Masters:</label></td>
-                <td class="col-sm-10"><ul class="list-unstyled">
+                <th>Other Masters</th>
+                <td class="col-sm-5"><ul class="list-unstyled">
                 {{ range $k, $p := .Peers }}
                   <li><a href="http://{{ $p.Name }}/ui/index.html">{{ $p.Name }}</a></li>
                 {{ end }}
diff --git a/weed/server/volume_grpc_vacuum.go b/weed/server/volume_grpc_vacuum.go
index b87de4b5b..f8d1b7fda 100644
--- a/weed/server/volume_grpc_vacuum.go
+++ b/weed/server/volume_grpc_vacuum.go
@@ -44,19 +44,14 @@ func (vs *VolumeServer) VacuumVolumeCommit(ctx context.Context, req *volume_serv
 
 	resp := &volume_server_pb.VacuumVolumeCommitResponse{}
 
-	err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
+	readOnly, err := vs.store.CommitCompactVolume(needle.VolumeId(req.VolumeId))
 
 	if err != nil {
 		glog.Errorf("commit volume %d: %v", req.VolumeId, err)
 	} else {
 		glog.V(1).Infof("commit volume %d", req.VolumeId)
 	}
-	if err == nil {
-		if vs.store.GetVolume(needle.VolumeId(req.VolumeId)).IsReadOnly() {
-			resp.IsReadOnly = true
-		}
-	}
-
+	resp.IsReadOnly = readOnly
 	return resp, err
 
 }
diff --git a/weed/server/volume_server.go b/weed/server/volume_server.go
index e11d607a4..f7359ea6b 100644
--- a/weed/server/volume_server.go
+++ b/weed/server/volume_server.go
@@ -43,7 +43,7 @@ type VolumeServer struct {
 
 func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
 	port int, publicUrl string,
-	folders []string, maxCounts []int, minFreeSpacePercents []float32, diskTypes []types.DiskType,
+	folders []string, maxCounts []int, minFreeSpaces []util.MinFreeSpace, diskTypes []types.DiskType,
 	idxFolder string,
 	needleMapKind storage.NeedleMapKind,
 	masterNodes []string, pulseSeconds int,
@@ -85,7 +85,7 @@ func NewVolumeServer(adminMux, publicMux *http.ServeMux, ip string,
 
 	vs.checkWithMaster()
 
-	vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpacePercents, idxFolder, vs.needleMapKind, diskTypes)
+	vs.store = storage.NewStore(vs.grpcDialOption, port, ip, publicUrl, folders, maxCounts, minFreeSpaces, idxFolder, vs.needleMapKind, diskTypes)
 	vs.guard = security.NewGuard(whiteList, signingKey, expiresAfterSec, readSigningKey, readExpiresAfterSec)
 
 	handleStaticResources(adminMux)
diff --git a/weed/server/volume_server_handlers_read.go b/weed/server/volume_server_handlers_read.go
index 3e977cfd4..2db46ac9b 100644
--- a/weed/server/volume_server_handlers_read.go
+++ b/weed/server/volume_server_handlers_read.go
@@ -27,7 +27,7 @@ var fileNameEscaper = strings.NewReplacer(`\`, `\\`, `"`, `\"`)
 
 func (vs *VolumeServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request) {
 
-	glog.V(9).Info(r.Method + " " + r.URL.Path + " " + r.Header.Get("Range"))
+	// println(r.Method + " " + r.URL.Path)
 
 	stats.VolumeServerRequestCounter.WithLabelValues("get").Inc()
 	start := time.Now()
@@ -261,10 +261,13 @@ func writeResponseContent(filename, mimeType string, rs io.ReadSeeker, w http.Re
 		return nil
 	}
 
-	processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64) error {
+	processRangeRequest(r, w, totalSize, mimeType, func(writer io.Writer, offset int64, size int64, httpStatusCode int) error {
 		if _, e = rs.Seek(offset, 0); e != nil {
 			return e
 		}
+		if httpStatusCode != 0 {
+			w.WriteHeader(httpStatusCode)
+		}
 		_, e = io.CopyN(writer, rs, size)
 		return e
 	})
diff --git a/weed/server/volume_server_handlers_write.go b/weed/server/volume_server_handlers_write.go
index 602b147e1..3d752eda6 100644
--- a/weed/server/volume_server_handlers_write.go
+++ b/weed/server/volume_server_handlers_write.go
@@ -13,7 +13,6 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/stats"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
 	"github.com/chrislusf/seaweedfs/weed/topology"
-	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
@@ -68,7 +67,7 @@ func (vs *VolumeServer) PostHandler(w http.ResponseWriter, r *http.Request) {
 		ret.Name = string(reqNeedle.Name)
 	}
 	ret.Size = uint32(originalSize)
-	ret.ETag = fmt.Sprintf("%x", util.Base64Md5ToBytes(contentMd5))
+	ret.ETag = reqNeedle.Etag()
 	ret.Mime = string(reqNeedle.Mime)
 	setEtag(w, ret.ETag)
 	w.Header().Set("Content-MD5", contentMd5)
diff --git a/weed/server/volume_server_ui/templates.go b/weed/server/volume_server_ui/templates.go
index 6a8bb6f55..ee4c2e31d 100644
--- a/weed/server/volume_server_ui/templates.go
+++ b/weed/server/volume_server_ui/templates.go
@@ -1,4 +1,4 @@
-package master_ui
+package volume_server_ui
 
 import (
 	"fmt"
diff --git a/weed/server/webdav_server.go b/weed/server/webdav_server.go
index c3f68fdee..c6550a36f 100644
--- a/weed/server/webdav_server.go
+++ b/weed/server/webdav_server.go
@@ -380,25 +380,32 @@ func (f *WebDavFile) saveDataAsChunk(reader io.Reader, name string, offset int64
 
 		ctx := context.Background()
 
-		request := &filer_pb.AssignVolumeRequest{
-			Count:       1,
-			Replication: f.fs.option.Replication,
-			Collection:  f.fs.option.Collection,
-			DiskType:    f.fs.option.DiskType,
-			Path:        name,
-		}
+		assignErr := util.Retry("assignVolume", func() error {
+			request := &filer_pb.AssignVolumeRequest{
+				Count:       1,
+				Replication: f.fs.option.Replication,
+				Collection:  f.fs.option.Collection,
+				DiskType:    f.fs.option.DiskType,
+				Path:        name,
+			}
 
-		resp, err := client.AssignVolume(ctx, request)
-		if err != nil {
-			glog.V(0).Infof("assign volume failure %v: %v", request, err)
-			return err
-		}
-		if resp.Error != "" {
-			return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
-		}
+			resp, err := client.AssignVolume(ctx, request)
+			if err != nil {
+				glog.V(0).Infof("assign volume failure %v: %v", request, err)
+				return err
+			}
+			if resp.Error != "" {
+				return fmt.Errorf("assign volume failure %v: %v", request, resp.Error)
+			}
 
-		fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
-		f.collection, f.replication = resp.Collection, resp.Replication
+			fileId, host, auth = resp.FileId, resp.Url, security.EncodedJwt(resp.Auth)
+			f.collection, f.replication = resp.Collection, resp.Replication
+
+			return nil
+		})
+		if assignErr != nil {
+			return assignErr
+		}
 
 		return nil
 	}); flushErr != nil {
diff --git a/weed/shell/command_ec_encode.go b/weed/shell/command_ec_encode.go
index 634cb11e2..8480bab06 100644
--- a/weed/shell/command_ec_encode.go
+++ b/weed/shell/command_ec_encode.go
@@ -63,6 +63,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
 	collection := encodeCommand.String("collection", "", "the collection name")
 	fullPercentage := encodeCommand.Float64("fullPercent", 95, "the volume reaches the percentage of max volume size")
 	quietPeriod := encodeCommand.Duration("quietFor", time.Hour, "select volumes without no writes for this period")
+	parallelCopy := encodeCommand.Bool("parallelCopy", true, "copy shards in parallel")
 	if err = encodeCommand.Parse(args); err != nil {
 		return nil
 	}
@@ -71,7 +72,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
 
 	// volumeId is provided
 	if vid != 0 {
-		return doEcEncode(commandEnv, *collection, vid)
+		return doEcEncode(commandEnv, *collection, vid, *parallelCopy)
 	}
 
 	// apply to all volumes in the collection
@@ -81,7 +82,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
 	}
 	fmt.Printf("ec encode volumes: %v\n", volumeIds)
 	for _, vid := range volumeIds {
-		if err = doEcEncode(commandEnv, *collection, vid); err != nil {
+		if err = doEcEncode(commandEnv, *collection, vid, *parallelCopy); err != nil {
 			return err
 		}
 	}
@@ -89,7 +90,7 @@ func (c *commandEcEncode) Do(args []string, commandEnv *CommandEnv, writer io.Wr
 	return nil
 }
 
-func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId) (err error) {
+func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId, parallelCopy bool) (err error) {
 	// find volume location
 	locations, found := commandEnv.MasterClient.GetLocations(uint32(vid))
 	if !found {
@@ -111,7 +112,7 @@ func doEcEncode(commandEnv *CommandEnv, collection string, vid needle.VolumeId)
 	}
 
 	// balance the ec shards to current cluster
-	err = spreadEcShards(commandEnv, vid, collection, locations)
+	err = spreadEcShards(commandEnv, vid, collection, locations, parallelCopy)
 	if err != nil {
 		return fmt.Errorf("spread ec shards for volume %d from %s: %v", vid, locations[0].Url, err)
 	}
@@ -157,7 +158,7 @@ func generateEcShards(grpcDialOption grpc.DialOption, volumeId needle.VolumeId,
 
 }
 
-func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location) (err error) {
+func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection string, existingLocations []wdclient.Location, parallelCopy bool) (err error) {
 
 	allEcNodes, totalFreeEcSlots, err := collectEcNodes(commandEnv, "")
 	if err != nil {
@@ -176,7 +177,7 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
 	allocatedEcIds := balancedEcDistribution(allocatedDataNodes)
 
 	// ask the data nodes to copy from the source volume server
-	copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0])
+	copiedShardIds, err := parallelCopyEcShardsFromSource(commandEnv.option.GrpcDialOption, allocatedDataNodes, allocatedEcIds, volumeId, collection, existingLocations[0], parallelCopy)
 	if err != nil {
 		return err
 	}
@@ -206,30 +207,36 @@ func spreadEcShards(commandEnv *CommandEnv, volumeId needle.VolumeId, collection
 
 }
 
-func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location) (actuallyCopied []uint32, err error) {
+func parallelCopyEcShardsFromSource(grpcDialOption grpc.DialOption, targetServers []*EcNode, allocatedEcIds [][]uint32, volumeId needle.VolumeId, collection string, existingLocation wdclient.Location, parallelCopy bool) (actuallyCopied []uint32, err error) {
 
 	fmt.Printf("parallelCopyEcShardsFromSource %d %s\n", volumeId, existingLocation.Url)
 
-	// parallelize
-	shardIdChan := make(chan []uint32, len(targetServers))
 	var wg sync.WaitGroup
+	shardIdChan := make(chan []uint32, len(targetServers))
+	copyFunc := func(server *EcNode, allocatedEcShardIds []uint32) {
+		defer wg.Done()
+		copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
+			allocatedEcShardIds, volumeId, collection, existingLocation.Url)
+		if copyErr != nil {
+			err = copyErr
+		} else {
+			shardIdChan <- copiedShardIds
+			server.addEcVolumeShards(volumeId, collection, copiedShardIds)
+		}
+	}
+
+	// maybe parallelize
 	for i, server := range targetServers {
 		if len(allocatedEcIds[i]) <= 0 {
 			continue
 		}
 
 		wg.Add(1)
-		go func(server *EcNode, allocatedEcShardIds []uint32) {
-			defer wg.Done()
-			copiedShardIds, copyErr := oneServerCopyAndMountEcShardsFromSource(grpcDialOption, server,
-				allocatedEcShardIds, volumeId, collection, existingLocation.Url)
-			if copyErr != nil {
-				err = copyErr
-			} else {
-				shardIdChan <- copiedShardIds
-				server.addEcVolumeShards(volumeId, collection, copiedShardIds)
-			}
-		}(server, allocatedEcIds[i])
+		if parallelCopy {
+			go copyFunc(server, allocatedEcIds[i])
+		} else {
+			copyFunc(server, allocatedEcIds[i])
+		}
 	}
 	wg.Wait()
 	close(shardIdChan)
diff --git a/weed/shell/command_fs_cat.go b/weed/shell/command_fs_cat.go
index df43d93dc..3c5e13663 100644
--- a/weed/shell/command_fs_cat.go
+++ b/weed/shell/command_fs_cat.go
@@ -52,7 +52,7 @@ func (c *commandFsCat) Do(args []string, commandEnv *CommandEnv, writer io.Write
 			return err
 		}
 
-		return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64, false)
+		return filer.StreamContent(commandEnv.MasterClient, writer, respLookupEntry.Entry.Chunks, 0, math.MaxInt64)
 
 	})
 
diff --git a/weed/shell/command_fs_lock_unlock.go b/weed/shell/command_fs_lock_unlock.go
index 8a6e8f71b..33458bb6f 100644
--- a/weed/shell/command_fs_lock_unlock.go
+++ b/weed/shell/command_fs_lock_unlock.go
@@ -1,6 +1,7 @@
 package shell
 
 import (
+	"github.com/chrislusf/seaweedfs/weed/util"
 	"io"
 )
 
@@ -26,7 +27,7 @@ func (c *commandLock) Help() string {
 
 func (c *commandLock) Do(args []string, commandEnv *CommandEnv, writer io.Writer) (err error) {
 
-	commandEnv.locker.RequestLock()
+	commandEnv.locker.RequestLock(util.DetectedHostAddress())
 
 	return nil
 }
diff --git a/weed/shell/command_fs_meta_load.go b/weed/shell/command_fs_meta_load.go
index 69ae9454c..46dc07e9a 100644
--- a/weed/shell/command_fs_meta_load.go
+++ b/weed/shell/command_fs_meta_load.go
@@ -4,6 +4,7 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"strings"
 
 	"github.com/golang/protobuf/proto"
 
@@ -72,6 +73,7 @@ func (c *commandFsMetaLoad) Do(args []string, commandEnv *CommandEnv, writer io.
 				return err
 			}
 
+			fullEntry.Entry.Name = strings.ReplaceAll(fullEntry.Entry.Name, "/", "x")
 			if err := filer_pb.CreateEntry(client, &filer_pb.CreateEntryRequest{
 				Directory: fullEntry.Dir,
 				Entry:     fullEntry.Entry,
diff --git a/weed/shell/command_fs_meta_save.go b/weed/shell/command_fs_meta_save.go
index ed19e3d01..37d94fe42 100644
--- a/weed/shell/command_fs_meta_save.go
+++ b/weed/shell/command_fs_meta_save.go
@@ -5,6 +5,8 @@ import (
 	"fmt"
 	"io"
 	"os"
+	"path/filepath"
+	"strings"
 	"sync"
 	"sync/atomic"
 	"time"
@@ -46,6 +48,7 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
 	fsMetaSaveCommand := flag.NewFlagSet(c.Name(), flag.ContinueOnError)
 	verbose := fsMetaSaveCommand.Bool("v", false, "print out each processed files")
 	outputFileName := fsMetaSaveCommand.String("o", "", "output the meta data to this file")
+	isObfuscate := fsMetaSaveCommand.Bool("obfuscate", false, "obfuscate the file names")
 	// chunksFileName := fsMetaSaveCommand.String("chunks", "", "output all the chunks to this file")
 	if err = fsMetaSaveCommand.Parse(args); err != nil {
 		return nil
@@ -69,6 +72,11 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
 	}
 	defer dst.Close()
 
+	var cipherKey util.CipherKey
+	if *isObfuscate {
+		cipherKey = util.GenCipherKey()
+	}
+
 	err = doTraverseBfsAndSaving(commandEnv, writer, path, *verbose, func(outputChan chan interface{}) {
 		sizeBuf := make([]byte, 4)
 		for item := range outputChan {
@@ -78,6 +86,13 @@ func (c *commandFsMetaSave) Do(args []string, commandEnv *CommandEnv, writer io.
 			dst.Write(b)
 		}
 	}, func(entry *filer_pb.FullEntry, outputChan chan interface{}) (err error) {
+		if !entry.Entry.IsDirectory {
+			ext := filepath.Ext(entry.Entry.Name)
+			if encrypted, encErr := util.Encrypt([]byte(entry.Entry.Name), cipherKey); encErr == nil {
+				entry.Entry.Name = util.Base64Encode(encrypted)[:len(entry.Entry.Name)] + ext
+				entry.Entry.Name = strings.ReplaceAll(entry.Entry.Name, "/", "x")
+			}
+		}
 		bytes, err := proto.Marshal(entry)
 		if err != nil {
 			fmt.Fprintf(writer, "marshall error: %v\n", err)
diff --git a/weed/shell/command_s3_clean_uploads.go b/weed/shell/command_s3_clean_uploads.go
index 5f674d7b6..1ba31292c 100644
--- a/weed/shell/command_s3_clean_uploads.go
+++ b/weed/shell/command_s3_clean_uploads.go
@@ -26,7 +26,7 @@ func (c *commandS3CleanUploads) Help() string {
 	return `clean up stale multipart uploads
 
 	Example:
-		s3.clean.uploads -replication 001
+		s3.clean.uploads -timeAgo 1.5h
 
 `
 }
diff --git a/weed/shell/command_volume_check_disk.go b/weed/shell/command_volume_check_disk.go
index 5a0d46869..0f156ac2f 100644
--- a/weed/shell/command_volume_check_disk.go
+++ b/weed/shell/command_volume_check_disk.go
@@ -85,6 +85,7 @@ func (c *commandVolumeCheckDisk) Do(args []string, commandEnv *CommandEnv, write
 			}
 			if a.info.ReadOnly || b.info.ReadOnly {
 				fmt.Fprintf(writer, "skipping readonly volume %d on %s and %s\n", a.info.Id, a.location.dataNode.Id, b.location.dataNode.Id)
+				replicas = replicas[1:]
 				continue
 			}
 
diff --git a/weed/shell/command_volume_server_evacuate.go b/weed/shell/command_volume_server_evacuate.go
index 80c5b1d6b..f21d0334c 100644
--- a/weed/shell/command_volume_server_evacuate.go
+++ b/weed/shell/command_volume_server_evacuate.go
@@ -176,6 +176,11 @@ func moveAwayOneEcVolume(commandEnv *CommandEnv, ecShardInfo *master_pb.VolumeEc
 
 func moveAwayOneNormalVolume(commandEnv *CommandEnv, volumeReplicas map[uint32][]*VolumeReplica, vol *master_pb.VolumeInformationMessage, thisNode *Node, otherNodes []*Node, applyChange bool) (hasMoved bool, err error) {
 	fn := capacityByFreeVolumeCount(types.ToDiskType(vol.DiskType))
+	for _, n := range otherNodes {
+		n.selectVolumes(func(v *master_pb.VolumeInformationMessage) bool {
+			return v.DiskType == vol.DiskType
+		})
+	}
 	sort.Slice(otherNodes, func(i, j int) bool {
 		return otherNodes[i].localVolumeRatio(fn) > otherNodes[j].localVolumeRatio(fn)
 	})
diff --git a/weed/shell/command_volume_tier_move.go b/weed/shell/command_volume_tier_move.go
index f7fa94031..d6a49d6e1 100644
--- a/weed/shell/command_volume_tier_move.go
+++ b/weed/shell/command_volume_tier_move.go
@@ -133,7 +133,7 @@ func doVolumeTierMove(commandEnv *CommandEnv, writer io.Writer, collection strin
 
 			// remove the remaining replicas
 			for _, loc := range locations {
-				if loc.Url != sourceVolumeServer {
+				if loc.Url != dst.dataNode.Id {
 					if err = deleteVolume(commandEnv.option.GrpcDialOption, vid, loc.Url); err != nil {
 						fmt.Fprintf(writer, "failed to delete volume %d on %s\n", vid, loc.Url)
 					}
diff --git a/weed/shell/shell_liner.go b/weed/shell/shell_liner.go
index d79f67032..1dd611ca5 100644
--- a/weed/shell/shell_liner.go
+++ b/weed/shell/shell_liner.go
@@ -2,6 +2,7 @@ package shell
 
 import (
 	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/util/grace"
 	"io"
 	"os"
 	"path"
@@ -25,6 +26,9 @@ func RunShell(options ShellOptions) {
 
 	line = liner.NewLiner()
 	defer line.Close()
+	grace.OnInterrupt(func() {
+		line.Close()
+	})
 
 	line.SetCtrlCAborts(true)
 
diff --git a/weed/storage/disk_location.go b/weed/storage/disk_location.go
index 6de87c793..33dd272ce 100644
--- a/weed/storage/disk_location.go
+++ b/weed/storage/disk_location.go
@@ -23,7 +23,7 @@ type DiskLocation struct {
 	DiskType               types.DiskType
 	MaxVolumeCount         int
 	OriginalMaxVolumeCount int
-	MinFreeSpacePercent    float32
+	MinFreeSpace           util.MinFreeSpace
 	volumes                map[needle.VolumeId]*Volume
 	volumesLock            sync.RWMutex
 
@@ -34,7 +34,7 @@ type DiskLocation struct {
 	isDiskSpaceLow bool
 }
 
-func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32, idxDir string, diskType types.DiskType) *DiskLocation {
+func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpace util.MinFreeSpace, idxDir string, diskType types.DiskType) *DiskLocation {
 	dir = util.ResolvePath(dir)
 	if idxDir == "" {
 		idxDir = dir
@@ -47,7 +47,7 @@ func NewDiskLocation(dir string, maxVolumeCount int, minFreeSpacePercent float32
 		DiskType:               diskType,
 		MaxVolumeCount:         maxVolumeCount,
 		OriginalMaxVolumeCount: maxVolumeCount,
-		MinFreeSpacePercent:    minFreeSpacePercent,
+		MinFreeSpace:           minFreeSpace,
 	}
 	location.volumes = make(map[needle.VolumeId]*Volume)
 	location.ecVolumes = make(map[needle.VolumeId]*erasure_coding.EcVolume)
@@ -131,7 +131,7 @@ func (l *DiskLocation) loadExistingVolume(fileInfo os.FileInfo, needleMapKind Ne
 	l.SetVolume(vid, v)
 
 	size, _, _ := v.FileStat()
-	glog.V(0).Infof("data file %s, replicaPlacement=%s v=%d size=%d ttl=%s",
+	glog.V(0).Infof("data file %s, replication=%s v=%d size=%d ttl=%s",
 		l.Directory+"/"+volumeName+".dat", v.ReplicaPlacement, v.Version(), size, v.Ttl.String())
 	return true
 }
@@ -361,14 +361,18 @@ func (l *DiskLocation) CheckDiskSpace() {
 			stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "all").Set(float64(s.All))
 			stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "used").Set(float64(s.Used))
 			stats.VolumeServerResourceGauge.WithLabelValues(l.Directory, "free").Set(float64(s.Free))
-			if (s.PercentFree < l.MinFreeSpacePercent) != l.isDiskSpaceLow {
+
+			isLow, desc := l.MinFreeSpace.IsLow(s.Free, s.PercentFree)
+			if isLow != l.isDiskSpaceLow {
 				l.isDiskSpaceLow = !l.isDiskSpaceLow
 			}
+
+			logLevel := glog.Level(4)
 			if l.isDiskSpaceLow {
-				glog.V(0).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
-			} else {
-				glog.V(4).Infof("dir %s freePercent %.2f%% < min %.2f%%, isLowDiskSpace: %v", dir, s.PercentFree, l.MinFreeSpacePercent, l.isDiskSpaceLow)
+				logLevel = glog.Level(0)
 			}
+
+			glog.V(logLevel).Infof("dir %s %s", dir, desc)
 		}
 		time.Sleep(time.Minute)
 	}
diff --git a/weed/storage/store.go b/weed/storage/store.go
index c3507c0e2..f27f2412f 100644
--- a/weed/storage/store.go
+++ b/weed/storage/store.go
@@ -2,6 +2,7 @@ package storage
 
 import (
 	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/util"
 	"path/filepath"
 	"strings"
 	"sync/atomic"
@@ -52,11 +53,12 @@ func (s *Store) String() (str string) {
 	return
 }
 
-func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int, minFreeSpacePercents []float32, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) {
+func NewStore(grpcDialOption grpc.DialOption, port int, ip, publicUrl string, dirnames []string, maxVolumeCounts []int,
+	minFreeSpaces []util.MinFreeSpace, idxFolder string, needleMapKind NeedleMapKind, diskTypes []DiskType) (s *Store) {
 	s = &Store{grpcDialOption: grpcDialOption, Port: port, Ip: ip, PublicUrl: publicUrl, NeedleMapKind: needleMapKind}
 	s.Locations = make([]*DiskLocation, 0)
 	for i := 0; i < len(dirnames); i++ {
-		location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpacePercents[i], idxFolder, diskTypes[i])
+		location := NewDiskLocation(dirnames[i], maxVolumeCounts[i], minFreeSpaces[i], idxFolder, diskTypes[i])
 		location.loadExistingVolumes(needleMapKind)
 		s.Locations = append(s.Locations, location)
 		stats.VolumeServerMaxVolumeCounter.Add(float64(maxVolumeCounts[i]))
@@ -106,6 +108,9 @@ func (s *Store) FindFreeLocation(diskType DiskType) (ret *DiskLocation) {
 		if diskType != location.DiskType {
 			continue
 		}
+		if location.isDiskSpaceLow {
+			continue
+		}
 		currentFreeCount := location.MaxVolumeCount - location.VolumesLen()
 		currentFreeCount *= erasure_coding.DataShardsCount
 		currentFreeCount -= location.EcVolumesLen()
diff --git a/weed/storage/store_ec.go b/weed/storage/store_ec.go
index a9b6a8ff3..9702fdd50 100644
--- a/weed/storage/store_ec.go
+++ b/weed/storage/store_ec.go
@@ -200,7 +200,6 @@ func (s *Store) readOneEcShardInterval(needleId types.NeedleId, ecVolume *erasur
 				return
 			}
 			glog.V(0).Infof("clearing ec shard %d.%d locations: %v", ecVolume.VolumeId, shardId, err)
-			forgetShardId(ecVolume, shardId)
 		}
 
 		// try reading by recovering from other shards
@@ -303,7 +302,7 @@ func (s *Store) doReadRemoteEcShardInterval(sourceDataNode string, needleId type
 				break
 			}
 			if receiveErr != nil {
-				return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, err)
+				return fmt.Errorf("receiving ec shard %d.%d from %s: %v", vid, shardId, sourceDataNode, receiveErr)
 			}
 			if resp.IsDeleted {
 				is_deleted = true
diff --git a/weed/storage/store_vacuum.go b/weed/storage/store_vacuum.go
index 32666a417..fe2033070 100644
--- a/weed/storage/store_vacuum.go
+++ b/weed/storage/store_vacuum.go
@@ -25,11 +25,11 @@ func (s *Store) CompactVolume(vid needle.VolumeId, preallocate int64, compaction
 	}
 	return fmt.Errorf("volume id %d is not found during compact", vid)
 }
-func (s *Store) CommitCompactVolume(vid needle.VolumeId) error {
+func (s *Store) CommitCompactVolume(vid needle.VolumeId) (bool, error) {
 	if v := s.findVolume(vid); v != nil {
-		return v.CommitCompact()
+		return v.IsReadOnly(), v.CommitCompact()
 	}
-	return fmt.Errorf("volume id %d is not found during commit compact", vid)
+	return false, fmt.Errorf("volume id %d is not found during commit compact", vid)
 }
 func (s *Store) CommitCleanupVolume(vid needle.VolumeId) error {
 	if v := s.findVolume(vid); v != nil {
diff --git a/weed/topology/allocate_volume.go b/weed/topology/allocate_volume.go
index 39c24ab04..7c7fae683 100644
--- a/weed/topology/allocate_volume.go
+++ b/weed/topology/allocate_volume.go
@@ -22,7 +22,7 @@ func AllocateVolume(dn *DataNode, grpcDialOption grpc.DialOption, vid needle.Vol
 			Collection:         option.Collection,
 			Replication:        option.ReplicaPlacement.String(),
 			Ttl:                option.Ttl.String(),
-			Preallocate:        option.Prealloacte,
+			Preallocate:        option.Preallocate,
 			MemoryMapMaxSizeMb: option.MemoryMapMaxSizeMb,
 			DiskType:           string(option.DiskType),
 		})
diff --git a/weed/topology/node.go b/weed/topology/node.go
index 95d63972e..a23729dd3 100644
--- a/weed/topology/node.go
+++ b/weed/topology/node.go
@@ -25,7 +25,7 @@ type Node interface {
 	SetParent(Node)
 	LinkChildNode(node Node)
 	UnlinkChildNode(nodeId NodeId)
-	CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSizeLimit uint64)
+	CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSizeLimit uint64, growThreshold float64)
 
 	IsDataNode() bool
 	IsRack() bool
@@ -235,20 +235,22 @@ func (n *NodeImpl) UnlinkChildNode(nodeId NodeId) {
 	}
 }
 
-func (n *NodeImpl) CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSizeLimit uint64) {
+func (n *NodeImpl) CollectDeadNodeAndFullVolumes(freshThreshHold int64, volumeSizeLimit uint64, growThreshold float64) {
 	if n.IsRack() {
 		for _, c := range n.Children() {
 			dn := c.(*DataNode) //can not cast n to DataNode
 			for _, v := range dn.GetVolumes() {
-				if uint64(v.Size) >= volumeSizeLimit {
+				if v.Size >= volumeSizeLimit {
 					//fmt.Println("volume",v.Id,"size",v.Size,">",volumeSizeLimit)
-					n.GetTopology().chanFullVolumes <- v
+					n.GetTopology().chanFullVolumes <- &v
+				}else if float64(v.Size) > float64(volumeSizeLimit) * growThreshold {
+					n.GetTopology().chanCrowdedVolumes <- &v
 				}
 			}
 		}
 	} else {
 		for _, c := range n.Children() {
-			c.CollectDeadNodeAndFullVolumes(freshThreshHold, volumeSizeLimit)
+			c.CollectDeadNodeAndFullVolumes(freshThreshHold, volumeSizeLimit, growThreshold)
 		}
 	}
 }
diff --git a/weed/topology/topology.go b/weed/topology/topology.go
index 08ebd24fd..3932e3fbb 100644
--- a/weed/topology/topology.go
+++ b/weed/topology/topology.go
@@ -34,9 +34,10 @@ type Topology struct {
 
 	Sequence sequence.Sequencer
 
-	chanFullVolumes chan storage.VolumeInfo
+	chanFullVolumes    chan *storage.VolumeInfo
+	chanCrowdedVolumes chan *storage.VolumeInfo
 
-	Configuration *Configuration
+	Configuration      *Configuration
 
 	RaftServer raft.Server
 }
@@ -56,7 +57,8 @@ func NewTopology(id string, seq sequence.Sequencer, volumeSizeLimit uint64, puls
 
 	t.Sequence = seq
 
-	t.chanFullVolumes = make(chan storage.VolumeInfo)
+	t.chanFullVolumes = make(chan *storage.VolumeInfo)
+	t.chanCrowdedVolumes = make(chan *storage.VolumeInfo)
 
 	t.Configuration = &Configuration{}
 
@@ -122,9 +124,11 @@ func (t *Topology) NextVolumeId() (needle.VolumeId, error) {
 	return next, nil
 }
 
+// deprecated
 func (t *Topology) HasWritableVolume(option *VolumeGrowOption) bool {
 	vl := t.GetVolumeLayout(option.Collection, option.ReplicaPlacement, option.Ttl, option.DiskType)
-	return vl.GetActiveVolumeCount(option) > 0
+	active, _ := vl.GetActiveVolumeCount(option)
+	return active > 0
 }
 
 func (t *Topology) PickForWrite(count uint64, option *VolumeGrowOption) (string, uint64, *DataNode, error) {
diff --git a/weed/topology/topology_event_handling.go b/weed/topology/topology_event_handling.go
index 543dacf29..e4eb430fe 100644
--- a/weed/topology/topology_event_handling.go
+++ b/weed/topology/topology_event_handling.go
@@ -10,12 +10,12 @@ import (
 	"github.com/chrislusf/seaweedfs/weed/storage"
 )
 
-func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, preallocate int64) {
+func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, garbageThreshold float64, growThreshold float64, preallocate int64) {
 	go func() {
 		for {
 			if t.IsLeader() {
 				freshThreshHold := time.Now().Unix() - 3*t.pulse //3 times of sleep interval
-				t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit)
+				t.CollectDeadNodeAndFullVolumes(freshThreshHold, t.volumeSizeLimit, growThreshold)
 			}
 			time.Sleep(time.Duration(float32(t.pulse*1e3)*(1+rand.Float32())) * time.Millisecond)
 		}
@@ -31,13 +31,15 @@ func (t *Topology) StartRefreshWritableVolumes(grpcDialOption grpc.DialOption, g
 	go func() {
 		for {
 			select {
-			case v := <-t.chanFullVolumes:
-				t.SetVolumeCapacityFull(v)
+			case fv := <-t.chanFullVolumes:
+				t.SetVolumeCapacityFull(fv)
+			case cv := <-t.chanCrowdedVolumes:
+				t.SetVolumeCrowded(cv)
 			}
 		}
 	}()
 }
-func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
+func (t *Topology) SetVolumeCapacityFull(volumeInfo *storage.VolumeInfo) bool {
 	diskType := types.ToDiskType(volumeInfo.DiskType)
 	vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
 	if !vl.SetVolumeCapacityFull(volumeInfo.Id) {
@@ -60,6 +62,13 @@ func (t *Topology) SetVolumeCapacityFull(volumeInfo storage.VolumeInfo) bool {
 	}
 	return true
 }
+
+func (t *Topology) SetVolumeCrowded(volumeInfo *storage.VolumeInfo) {
+	diskType := types.ToDiskType(volumeInfo.DiskType)
+	vl := t.GetVolumeLayout(volumeInfo.Collection, volumeInfo.ReplicaPlacement, volumeInfo.Ttl, diskType)
+	vl.SetVolumeCrowded(volumeInfo.Id)
+}
+
 func (t *Topology) UnRegisterDataNode(dn *DataNode) {
 	for _, v := range dn.GetVolumes() {
 		glog.V(0).Infoln("Removing Volume", v.Id, "from the dead volume server", dn.Id())
diff --git a/weed/topology/volume_growth.go b/weed/topology/volume_growth.go
index 8941a049b..ae0b11c81 100644
--- a/weed/topology/volume_growth.go
+++ b/weed/topology/volume_growth.go
@@ -3,18 +3,17 @@ package topology
 import (
 	"encoding/json"
 	"fmt"
-	"github.com/chrislusf/seaweedfs/weed/storage/types"
 	"math/rand"
 	"sync"
 
-	"github.com/chrislusf/seaweedfs/weed/storage/needle"
-	"github.com/chrislusf/seaweedfs/weed/storage/super_block"
-	"github.com/chrislusf/seaweedfs/weed/util"
-
 	"google.golang.org/grpc"
 
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/storage"
+	"github.com/chrislusf/seaweedfs/weed/storage/needle"
+	"github.com/chrislusf/seaweedfs/weed/storage/super_block"
+	"github.com/chrislusf/seaweedfs/weed/storage/types"
+	"github.com/chrislusf/seaweedfs/weed/util"
 )
 
 /*
@@ -25,12 +24,18 @@ This package is created to resolve these replica placement issues:
 4. volume allocation for each bucket
 */
 
+type VolumeGrowRequest struct {
+	Option *VolumeGrowOption
+	Count  int
+	ErrCh  chan error
+}
+
 type VolumeGrowOption struct {
 	Collection         string                        `json:"collection,omitempty"`
 	ReplicaPlacement   *super_block.ReplicaPlacement `json:"replication,omitempty"`
 	Ttl                *needle.TTL                   `json:"ttl,omitempty"`
 	DiskType           types.DiskType                `json:"disk,omitempty"`
-	Prealloacte        int64                         `json:"prealloacte,omitempty"`
+	Preallocate        int64                         `json:"preallocate,omitempty"`
 	DataCenter         string                        `json:"dataCenter,omitempty"`
 	Rack               string                        `json:"rack,omitempty"`
 	DataNode           string                        `json:"dataNode,omitempty"`
@@ -46,6 +51,11 @@ func (o *VolumeGrowOption) String() string {
 	return string(blob)
 }
 
+func (o *VolumeGrowOption) Threshold() float64 {
+	v := util.GetViper()
+	return v.GetFloat64("master.volume_growth.threshold")
+}
+
 func NewDefaultVolumeGrowth() *VolumeGrowth {
 	return &VolumeGrowth{}
 }
@@ -54,10 +64,6 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
 // given copyCount, how many logical volumes to create
 func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
 	v := util.GetViper()
-	v.SetDefault("master.volume_growth.copy_1", 7)
-	v.SetDefault("master.volume_growth.copy_2", 6)
-	v.SetDefault("master.volume_growth.copy_3", 3)
-	v.SetDefault("master.volume_growth.copy_other", 1)
 	switch copyCount {
 	case 1:
 		count = v.GetInt("master.volume_growth.copy_1")
diff --git a/weed/topology/volume_layout.go b/weed/topology/volume_layout.go
index c7e171248..57e511fa0 100644
--- a/weed/topology/volume_layout.go
+++ b/weed/topology/volume_layout.go
@@ -27,6 +27,7 @@ type volumeState string
 const (
 	readOnlyState  volumeState = "ReadOnly"
 	oversizedState             = "Oversized"
+	crowdedState               = "Crowded"
 )
 
 type stateIndicator func(copyState) bool
@@ -106,7 +107,8 @@ type VolumeLayout struct {
 	ttl              *needle.TTL
 	diskType         types.DiskType
 	vid2location     map[needle.VolumeId]*VolumeLocationList
-	writables        []needle.VolumeId   // transient array of writable volume id
+	writables        []needle.VolumeId // transient array of writable volume id
+	crowded          map[needle.VolumeId]interface{}
 	readonlyVolumes  *volumesBinaryState // readonly volumes
 	oversizedVolumes *volumesBinaryState // oversized volumes
 	volumeSizeLimit  uint64
@@ -127,6 +129,7 @@ func NewVolumeLayout(rp *super_block.ReplicaPlacement, ttl *needle.TTL, diskType
 		diskType:         diskType,
 		vid2location:     make(map[needle.VolumeId]*VolumeLocationList),
 		writables:        *new([]needle.VolumeId),
+		crowded:          make(map[needle.VolumeId]interface{}),
 		readonlyVolumes:  NewVolumesBinaryState(readOnlyState, rp, ExistCopies()),
 		oversizedVolumes: NewVolumesBinaryState(oversizedState, rp, ExistCopies()),
 		volumeSizeLimit:  volumeSizeLimit,
@@ -273,7 +276,7 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*n
 
 	lenWriters := len(vl.writables)
 	if lenWriters <= 0 {
-		glog.V(0).Infoln("No more writable volumes!")
+		//glog.V(0).Infoln("No more writable volumes!")
 		return nil, 0, nil, errors.New("No more writable volumes!")
 	}
 	if option.DataCenter == "" {
@@ -307,14 +310,13 @@ func (vl *VolumeLayout) PickForWrite(count uint64, option *VolumeGrowOption) (*n
 	return &vid, count, locationList, nil
 }
 
-func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
+func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (active, crowded int) {
 	vl.accessLock.RLock()
 	defer vl.accessLock.RUnlock()
 
 	if option.DataCenter == "" {
-		return len(vl.writables)
+		return len(vl.writables), len(vl.crowded)
 	}
-	counter := 0
 	for _, v := range vl.writables {
 		for _, dn := range vl.vid2location[v].list {
 			if dn.GetDataCenter().Id() == NodeId(option.DataCenter) {
@@ -324,11 +326,15 @@ func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) int {
 				if option.DataNode != "" && dn.Id() != NodeId(option.DataNode) {
 					continue
 				}
-				counter++
+				active++
+				info, _ := dn.GetVolumesById(v)
+				if float64(info.Size) > float64(vl.volumeSizeLimit)*option.Threshold() {
+					crowded++
+				}
 			}
 		}
 	}
-	return counter
+	return
 }
 
 func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
@@ -342,6 +348,7 @@ func (vl *VolumeLayout) removeFromWritable(vid needle.VolumeId) bool {
 	if toDeleteIndex >= 0 {
 		glog.V(0).Infoln("Volume", vid, "becomes unwritable")
 		vl.writables = append(vl.writables[0:toDeleteIndex], vl.writables[toDeleteIndex+1:]...)
+		vl.removeFromCrowded(vid)
 		return true
 	}
 	return false
@@ -408,6 +415,32 @@ func (vl *VolumeLayout) SetVolumeCapacityFull(vid needle.VolumeId) bool {
 	return vl.removeFromWritable(vid)
 }
 
+func (vl *VolumeLayout) removeFromCrowded(vid needle.VolumeId) {
+	delete(vl.crowded, vid)
+}
+
+func (vl *VolumeLayout) setVolumeCrowded(vid needle.VolumeId) {
+	if _, ok := vl.crowded[vid]; !ok {
+		vl.crowded[vid] = nil
+		glog.V(0).Infoln("Volume", vid, "becomes crowded")
+	}
+}
+
+func (vl *VolumeLayout) SetVolumeCrowded(vid needle.VolumeId) {
+	// since delete is guarded by accessLock.Lock(),
+	// and is always called in sequential order,
+	// RLock() should be safe enough
+	vl.accessLock.RLock()
+	defer vl.accessLock.RUnlock()
+
+	for _, v := range vl.writables {
+		if v == vid {
+			vl.setVolumeCrowded(vid)
+			break
+		}
+	}
+}
+
 func (vl *VolumeLayout) ToMap() map[string]interface{} {
 	m := make(map[string]interface{})
 	m["replication"] = vl.rp.String()
diff --git a/weed/util/bounded_tree/bounded_tree.go b/weed/util/bounded_tree/bounded_tree.go
index 3a8a22a9c..137f690b8 100644
--- a/weed/util/bounded_tree/bounded_tree.go
+++ b/weed/util/bounded_tree/bounded_tree.go
@@ -41,9 +41,6 @@ func (t *BoundedTree) EnsureVisited(p util.FullPath, visitFn VisitNodeFunc) (vis
 	if t.root == nil {
 		return
 	}
-	if t.baseDir != "/" {
-		p = p[len(t.baseDir):]
-	}
 	components := p.Split()
 	// fmt.Printf("components %v %d\n", components, len(components))
 	canDelete, err := t.ensureVisited(t.root, t.baseDir, components, 0, visitFn)
diff --git a/weed/util/bytes.go b/weed/util/bytes.go
index c2a4df108..26da91033 100644
--- a/weed/util/bytes.go
+++ b/weed/util/bytes.go
@@ -7,6 +7,10 @@ import (
 	"encoding/base64"
 	"fmt"
 	"io"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
 )
 
 // BytesToHumanReadable returns the converted human readable representation of the bytes.
@@ -161,3 +165,94 @@ func NewBytesReader(b []byte) *BytesReader {
 		Reader: bytes.NewReader(b),
 	}
 }
+
+// EmptyTo returns to if s is empty.
+func EmptyTo(s, to string) string {
+	if s == "" {
+		return to
+	}
+
+	return s
+}
+
+// IfElse works like b ? this : that.
+func IfElse(b bool, this, that string) string {
+	if b {
+		return this
+	}
+	return that
+}
+
+// ParseBytes parses a string representation of bytes into the number
+// of bytes it represents.
+//
+// See Also: Bytes, IBytes.
+//
+// ParseBytes("42MB") -> 42000000, nil
+// ParseBytes("42 MB") -> 42000000, nil
+// ParseBytes("42 mib") -> 44040192, nil
+func ParseBytes(s string) (uint64, error) {
+	lastDigit := 0
+	hasComma := false
+	for _, r := range s {
+		if !(unicode.IsDigit(r) || r == '.' || r == ',') {
+			break
+		}
+		if r == ',' {
+			hasComma = true
+		}
+		lastDigit++
+	}
+
+	num := s[:lastDigit]
+	if hasComma {
+		num = strings.Replace(num, ",", "", -1)
+	}
+
+	f, err := strconv.ParseFloat(num, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	extra := strings.ToLower(strings.TrimSpace(s[lastDigit:]))
+	if m, ok := bytesSizeTable[extra]; ok {
+		f *= float64(m)
+		if f >= math.MaxUint64 {
+			return 0, fmt.Errorf("too large: %v", s)
+		}
+		return uint64(f), nil
+	}
+
+	return 0, fmt.Errorf("unhandled size name: %v", extra)
+}
+
+var bytesSizeTable = map[string]uint64{
+	"b": Byte, "kib": KiByte, "kb": KByte, "mib": MiByte, "mb": MByte, "gib": GiByte, "gb": GByte,
+	"tib": TiByte, "tb": TByte, "pib": PiByte, "pb": PByte, "eib": EiByte, "eb": EByte,
+	// Without suffix
+	"": Byte, "ki": KiByte, "k": KByte, "mi": MiByte, "m": MByte, "gi": GiByte, "g": GByte,
+	"ti": TiByte, "t": TByte, "pi": PiByte, "p": PByte, "ei": EiByte, "e": EByte,
+}
+
+// IEC Sizes.
+// kibis of bits
+const (
+	Byte = 1 << (iota * 10)
+	KiByte
+	MiByte
+	GiByte
+	TiByte
+	PiByte
+	EiByte
+)
+
+// SI Sizes.
+const (
+	IByte = 1
+	KByte = IByte * 1000
+	MByte = KByte * 1000
+	GByte = MByte * 1000
+	TByte = GByte * 1000
+	PByte = TByte * 1000
+	EByte = PByte * 1000
+)
diff --git a/weed/util/bytes_test.go b/weed/util/bytes_test.go
new file mode 100644
index 000000000..d9269cadb
--- /dev/null
+++ b/weed/util/bytes_test.go
@@ -0,0 +1,59 @@
+package util
+
+import "testing"
+
+func TestByteParsing(t *testing.T) {
+	tests := []struct {
+		in  string
+		exp uint64
+	}{
+		{"42", 42},
+		{"42MB", 42000000},
+		{"42MiB", 44040192},
+		{"42mb", 42000000},
+		{"42mib", 44040192},
+		{"42MIB", 44040192},
+		{"42 MB", 42000000},
+		{"42 MiB", 44040192},
+		{"42 mb", 42000000},
+		{"42 mib", 44040192},
+		{"42 MIB", 44040192},
+		{"42.5MB", 42500000},
+		{"42.5MiB", 44564480},
+		{"42.5 MB", 42500000},
+		{"42.5 MiB", 44564480},
+		// No need to say B
+		{"42M", 42000000},
+		{"42Mi", 44040192},
+		{"42m", 42000000},
+		{"42mi", 44040192},
+		{"42MI", 44040192},
+		{"42 M", 42000000},
+		{"42 Mi", 44040192},
+		{"42 m", 42000000},
+		{"42 mi", 44040192},
+		{"42 MI", 44040192},
+		{"42.5M", 42500000},
+		{"42.5Mi", 44564480},
+		{"42.5 M", 42500000},
+		{"42.5 Mi", 44564480},
+		// Bug #42
+		{"1,005.03 MB", 1005030000},
+		// Large testing, breaks when too much larger than
+		// this.
+		{"12.5 EB", uint64(12.5 * float64(EByte))},
+		{"12.5 E", uint64(12.5 * float64(EByte))},
+		{"12.5 EiB", uint64(12.5 * float64(EiByte))},
+	}
+
+	for _, p := range tests {
+		got, err := ParseBytes(p.in)
+		if err != nil {
+			t.Errorf("Couldn't parse %v: %v", p.in, err)
+		}
+		if got != p.exp {
+			t.Errorf("Expected %v for %v, got %v",
+				p.exp, p.in, got)
+		}
+	}
+}
diff --git a/weed/util/chunk_cache/chunk_cache.go b/weed/util/chunk_cache/chunk_cache.go
index 3615aee0e..40d24b322 100644
--- a/weed/util/chunk_cache/chunk_cache.go
+++ b/weed/util/chunk_cache/chunk_cache.go
@@ -1,14 +1,18 @@
 package chunk_cache
 
 import (
+	"errors"
 	"sync"
 
 	"github.com/chrislusf/seaweedfs/weed/glog"
 	"github.com/chrislusf/seaweedfs/weed/storage/needle"
 )
 
+var ErrorOutOfBounds = errors.New("attempt to read out of bounds")
+
 type ChunkCache interface {
 	GetChunk(fileId string, minSize uint64) (data []byte)
+	GetChunkSlice(fileId string, offset, length uint64) []byte
 	SetChunk(fileId string, data []byte)
 }
 
@@ -22,6 +26,8 @@ type TieredChunkCache struct {
 	onDiskCacheSizeLimit2 uint64
 }
 
+var _ ChunkCache = &TieredChunkCache{}
+
 func NewTieredChunkCache(maxEntries int64, dir string, diskSizeInUnit int64, unitSize int64) *TieredChunkCache {
 
 	c := &TieredChunkCache{
@@ -87,6 +93,58 @@ func (c *TieredChunkCache) doGetChunk(fileId string, minSize uint64) (data []byt
 
 }
 
+func (c *TieredChunkCache) GetChunkSlice(fileId string, offset, length uint64) []byte {
+	if c == nil {
+		return nil
+	}
+
+	c.RLock()
+	defer c.RUnlock()
+
+	return c.doGetChunkSlice(fileId, offset, length)
+}
+
+func (c *TieredChunkCache) doGetChunkSlice(fileId string, offset, length uint64) (data []byte) {
+
+	minSize := offset + length
+	if minSize <= c.onDiskCacheSizeLimit0 {
+		data, err := c.memCache.getChunkSlice(fileId, offset, length)
+		if err != nil {
+			glog.Errorf("failed to read from memcache: %s", err)
+		}
+		if len(data) >= int(minSize) {
+			return data
+		}
+	}
+
+	fid, err := needle.ParseFileIdFromString(fileId)
+	if err != nil {
+		glog.Errorf("failed to parse file id %s", fileId)
+		return nil
+	}
+
+	if minSize <= c.onDiskCacheSizeLimit0 {
+		data = c.diskCaches[0].getChunkSlice(fid.Key, offset, length)
+		if len(data) >= int(minSize) {
+			return data
+		}
+	}
+	if minSize <= c.onDiskCacheSizeLimit1 {
+		data = c.diskCaches[1].getChunkSlice(fid.Key, offset, length)
+		if len(data) >= int(minSize) {
+			return data
+		}
+	}
+	{
+		data = c.diskCaches[2].getChunkSlice(fid.Key, offset, length)
+		if len(data) >= int(minSize) {
+			return data
+		}
+	}
+
+	return nil
+}
+
 func (c *TieredChunkCache) SetChunk(fileId string, data []byte) {
 	if c == nil {
 		return
@@ -131,3 +189,10 @@ func (c *TieredChunkCache) Shutdown() {
 		diskCache.shutdown()
 	}
 }
+
+func min(x, y int) int {
+	if x < y {
+		return x
+	}
+	return y
+}
diff --git a/weed/util/chunk_cache/chunk_cache_in_memory.go b/weed/util/chunk_cache/chunk_cache_in_memory.go
index 5f26b8c78..d725f8a16 100644
--- a/weed/util/chunk_cache/chunk_cache_in_memory.go
+++ b/weed/util/chunk_cache/chunk_cache_in_memory.go
@@ -31,6 +31,20 @@ func (c *ChunkCacheInMemory) GetChunk(fileId string) []byte {
 	return data
 }
 
+func (c *ChunkCacheInMemory) getChunkSlice(fileId string, offset, length uint64) ([]byte, error) {
+	item := c.cache.Get(fileId)
+	if item == nil {
+		return nil, nil
+	}
+	data := item.Value().([]byte)
+	item.Extend(time.Hour)
+	wanted := min(int(length), len(data)-int(offset))
+	if wanted < 0 {
+		return nil, ErrorOutOfBounds
+	}
+	return data[offset : int(offset)+wanted], nil
+}
+
 func (c *ChunkCacheInMemory) SetChunk(fileId string, data []byte) {
 	localCopy := make([]byte, len(data))
 	copy(localCopy, data)
diff --git a/weed/util/chunk_cache/chunk_cache_on_disk.go b/weed/util/chunk_cache/chunk_cache_on_disk.go
index d724e925e..36de5c972 100644
--- a/weed/util/chunk_cache/chunk_cache_on_disk.go
+++ b/weed/util/chunk_cache/chunk_cache_on_disk.go
@@ -88,15 +88,17 @@ func (v *ChunkCacheVolume) Shutdown() {
 	}
 }
 
-func (v *ChunkCacheVolume) destroy() {
+func (v *ChunkCacheVolume) doReset() {
 	v.Shutdown()
-	os.Remove(v.fileName + ".dat")
-	os.Remove(v.fileName + ".idx")
+	os.Truncate(v.fileName+".dat", 0)
+	os.Truncate(v.fileName+".idx", 0)
+	glog.V(4).Infof("cache removeAll %s ...", v.fileName+".ldb")
 	os.RemoveAll(v.fileName + ".ldb")
+	glog.V(4).Infof("cache removed %s", v.fileName+".ldb")
 }
 
 func (v *ChunkCacheVolume) Reset() (*ChunkCacheVolume, error) {
-	v.destroy()
+	v.doReset()
 	return LoadOrCreateChunkCacheVolume(v.fileName, v.sizeLimit)
 }
 
@@ -119,6 +121,29 @@ func (v *ChunkCacheVolume) GetNeedle(key types.NeedleId) ([]byte, error) {
 	return data, nil
 }
 
+func (v *ChunkCacheVolume) getNeedleSlice(key types.NeedleId, offset, length uint64) ([]byte, error) {
+	nv, ok := v.nm.Get(key)
+	if !ok {
+		return nil, storage.ErrorNotFound
+	}
+	wanted := min(int(length), int(nv.Size)-int(offset))
+	if wanted < 0 {
+		// should never happen, but better than panicing
+		return nil, ErrorOutOfBounds
+	}
+	data := make([]byte, wanted)
+	if readSize, readErr := v.DataBackend.ReadAt(data, nv.Offset.ToActualOffset()+int64(offset)); readErr != nil {
+		return nil, fmt.Errorf("read %s.dat [%d,%d): %v",
+			v.fileName, nv.Offset.ToActualOffset()+int64(offset), int(nv.Offset.ToActualOffset())+int(offset)+wanted, readErr)
+	} else {
+		if readSize != wanted {
+			return nil, fmt.Errorf("read %d, expected %d", readSize, wanted)
+		}
+	}
+
+	return data, nil
+}
+
 func (v *ChunkCacheVolume) WriteNeedle(key types.NeedleId, data []byte) error {
 
 	offset := v.fileSize
diff --git a/weed/util/chunk_cache/on_disk_cache_layer.go b/weed/util/chunk_cache/on_disk_cache_layer.go
index eebd89798..a4b3b6994 100644
--- a/weed/util/chunk_cache/on_disk_cache_layer.go
+++ b/weed/util/chunk_cache/on_disk_cache_layer.go
@@ -82,6 +82,28 @@ func (c *OnDiskCacheLayer) getChunk(needleId types.NeedleId) (data []byte) {
 
 }
 
+func (c *OnDiskCacheLayer) getChunkSlice(needleId types.NeedleId, offset, length uint64) (data []byte) {
+
+	var err error
+
+	for _, diskCache := range c.diskCaches {
+		data, err = diskCache.getNeedleSlice(needleId, offset, length)
+		if err == storage.ErrorNotFound {
+			continue
+		}
+		if err != nil {
+			glog.Errorf("failed to read cache file %s id %d", diskCache.fileName, needleId)
+			continue
+		}
+		if len(data) != 0 {
+			return
+		}
+	}
+
+	return nil
+
+}
+
 func (c *OnDiskCacheLayer) shutdown() {
 
 	for _, diskCache := range c.diskCaches {
diff --git a/weed/util/compression.go b/weed/util/compression.go
index 9d52810cb..8699a8117 100644
--- a/weed/util/compression.go
+++ b/weed/util/compression.go
@@ -126,7 +126,7 @@ func IsZstdContent(data []byte) bool {
 
 	// by file name extension
 	switch ext {
-	case ".zip", ".rar", ".gz", ".bz2", ".xz", ".zst":
+	case ".zip", ".rar", ".gz", ".bz2", ".xz", ".zst", ".br":
 		return false, true
 	case ".pdf", ".txt", ".html", ".htm", ".css", ".js", ".json":
 		return true, true
@@ -147,6 +147,9 @@ func IsZstdContent(data []byte) bool {
 		if strings.HasSuffix(mtype, "script") {
 			return true, true
 		}
+		if strings.HasSuffix(mtype, "vnd.rar") {
+			return false, true
+		}
 	}
 
 	if strings.HasPrefix(mtype, "audio/") {
diff --git a/weed/util/constants.go b/weed/util/constants.go
index 40f4deae2..ef2159c2c 100644
--- a/weed/util/constants.go
+++ b/weed/util/constants.go
@@ -5,7 +5,7 @@ import (
 )
 
 var (
-	VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 38)
+	VERSION = fmt.Sprintf("%s %d.%02d", sizeLimit, 2, 43)
 	COMMIT  = ""
 )
 
diff --git a/weed/util/http_util.go b/weed/util/http_util.go
index 135d10c45..1630760b1 100644
--- a/weed/util/http_util.go
+++ b/weed/util/http_util.go
@@ -124,6 +124,27 @@ func Delete(url string, jwt string) error {
 	return errors.New(string(body))
 }
 
+func DeleteProxied(url string, jwt string) (body []byte, httpStatus int, err error) {
+	req, err := http.NewRequest("DELETE", url, nil)
+	if jwt != "" {
+		req.Header.Set("Authorization", "BEARER "+string(jwt))
+	}
+	if err != nil {
+		return
+	}
+	resp, err := client.Do(req)
+	if err != nil {
+		return
+	}
+	defer resp.Body.Close()
+	body, err = ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return
+	}
+	httpStatus = resp.StatusCode
+	return
+}
+
 func GetBufferStream(url string, values url.Values, allocatedBytes []byte, eachBuffer func([]byte)) error {
 	r, err := client.PostForm(url, values)
 	if err != nil {
diff --git a/weed/util/minfreespace.go b/weed/util/minfreespace.go
new file mode 100644
index 000000000..c802bf6dd
--- /dev/null
+++ b/weed/util/minfreespace.go
@@ -0,0 +1,90 @@
+package util
+
+import (
+	"errors"
+	"fmt"
+	"github.com/chrislusf/seaweedfs/weed/glog"
+	"strconv"
+	"strings"
+)
+
+// MinFreeSpaceType is the type of MinFreeSpace.
+type MinFreeSpaceType int
+
+const (
+	// AsPercent set the MinFreeSpaceType to a percentage value from 0 to 100.
+	AsPercent MinFreeSpaceType = iota
+	// AsBytes set the MinFreeSpaceType to a absolute value bytes.
+	AsBytes
+)
+
+// MinFreeSpace is type that defines the limit for the minimum free space.
+type MinFreeSpace struct {
+	Type    MinFreeSpaceType
+	Bytes   uint64
+	Percent float32
+	Raw     string
+}
+
+// IsLow tells whether the free space is low or not.
+func (s MinFreeSpace) IsLow(freeBytes uint64, freePercent float32) (yes bool, desc string) {
+	switch s.Type {
+	case AsPercent:
+		yes = freePercent < s.Percent
+		op := IfElse(yes, "<", ">=")
+		return yes, fmt.Sprintf("disk free %.2f%% %s required %.2f%%", freePercent, op, s.Percent)
+	case AsBytes:
+		yes = freeBytes < s.Bytes
+		op := IfElse(yes, "<", ">=")
+		return yes, fmt.Sprintf("disk free %s %s required %s",
+			BytesToHumanReadable(freeBytes), op, BytesToHumanReadable(s.Bytes))
+	}
+
+	return false, ""
+}
+
+// String returns a string representation of MinFreeSpace.
+func (s MinFreeSpace) String() string {
+	switch s.Type {
+	case AsPercent:
+		return fmt.Sprintf("%.2f%%", s.Percent)
+	default:
+		return s.Raw
+	}
+}
+
+// MustParseMinFreeSpace parses comma-separated argument for min free space setting.
+// minFreeSpace has the high priority than minFreeSpacePercent if it is set.
+func MustParseMinFreeSpace(minFreeSpace string, minFreeSpacePercent string) (spaces []MinFreeSpace) {
+	ss := strings.Split(EmptyTo(minFreeSpace, minFreeSpacePercent), ",")
+	for _, freeString := range ss {
+		if vv, e := ParseMinFreeSpace(freeString); e == nil {
+			spaces = append(spaces, *vv)
+		} else {
+			glog.Fatalf("The value specified in -minFreeSpace not a valid value %s", freeString)
+		}
+	}
+
+	return spaces
+}
+
+var ErrMinFreeSpaceBadValue = errors.New("minFreeSpace is invalid")
+
+// ParseMinFreeSpace parses min free space expression s as percentage like 1,10 or human readable size like 10G
+func ParseMinFreeSpace(s string) (*MinFreeSpace, error) {
+	if percent, e := strconv.ParseFloat(s, 32); e == nil {
+		if percent < 0 || percent > 100 {
+			return nil, ErrMinFreeSpaceBadValue
+		}
+		return &MinFreeSpace{Type: AsPercent, Percent: float32(percent), Raw: s}, nil
+	}
+
+	if directSize, e := ParseBytes(s); e == nil {
+		if directSize <= 100 {
+			return nil, ErrMinFreeSpaceBadValue
+		}
+		return &MinFreeSpace{Type: AsBytes, Bytes: directSize, Raw: s}, nil
+	}
+
+	return nil, ErrMinFreeSpaceBadValue
+}
diff --git a/weed/util/minfreespace_test.go b/weed/util/minfreespace_test.go
new file mode 100644
index 000000000..eec1942dd
--- /dev/null
+++ b/weed/util/minfreespace_test.go
@@ -0,0 +1,29 @@
+package util
+
+import "testing"
+
+func TestParseMinFreeSpace(t *testing.T) {
+	tests := []struct {
+		in    string
+		ok    bool
+		value *MinFreeSpace
+	}{
+		{in: "42", ok: true, value: &MinFreeSpace{Type: AsPercent, Percent: 42, Raw: "42"}},
+		{in: "-1", ok: false, value: nil},
+		{in: "101", ok: false, value: nil},
+		{in: "100B", ok: false, value: nil},
+		{in: "100Ki", ok: true, value: &MinFreeSpace{Type: AsBytes, Bytes: 100 * 1024, Raw: "100Ki"}},
+		{in: "100GiB", ok: true, value: &MinFreeSpace{Type: AsBytes, Bytes: 100 * 1024 * 1024 * 1024, Raw: "100GiB"}},
+		{in: "42M", ok: true, value: &MinFreeSpace{Type: AsBytes, Bytes: 42 * 1000 * 1000, Raw: "42M"}},
+	}
+
+	for _, p := range tests {
+		got, err := ParseMinFreeSpace(p.in)
+		if p.ok != (err == nil) {
+			t.Errorf("failed to test %v", p.in)
+		}
+		if p.ok && err == nil && *got != *p.value {
+			t.Errorf("failed to test %v", p.in)
+		}
+	}
+}
diff --git a/weed/wdclient/exclusive_locks/exclusive_locker.go b/weed/wdclient/exclusive_locks/exclusive_locker.go
index d477a6b2d..5b5fa2704 100644
--- a/weed/wdclient/exclusive_locks/exclusive_locker.go
+++ b/weed/wdclient/exclusive_locks/exclusive_locker.go
@@ -41,7 +41,7 @@ func (l *ExclusiveLocker) GetToken() (token int64, lockTsNs int64) {
 	return atomic.LoadInt64(&l.token), atomic.LoadInt64(&l.lockTsNs)
 }
 
-func (l *ExclusiveLocker) RequestLock() {
+func (l *ExclusiveLocker) RequestLock(clientName string) {
 	if l.isLocking {
 		return
 	}
@@ -56,6 +56,7 @@ func (l *ExclusiveLocker) RequestLock() {
 				PreviousToken:    atomic.LoadInt64(&l.token),
 				PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
 				LockName:         AdminLockName,
+				ClientName:       clientName,
 			})
 			if err == nil {
 				atomic.StoreInt64(&l.token, resp.Token)
@@ -63,7 +64,7 @@ func (l *ExclusiveLocker) RequestLock() {
 			}
 			return err
 		}); err != nil {
-			// println("leasing problem", err.Error())
+			println("lock:", err.Error())
 			time.Sleep(InitLockInteval)
 		} else {
 			break
@@ -83,6 +84,7 @@ func (l *ExclusiveLocker) RequestLock() {
 					PreviousToken:    atomic.LoadInt64(&l.token),
 					PreviousLockTime: atomic.LoadInt64(&l.lockTsNs),
 					LockName:         AdminLockName,
+					ClientName:       clientName,
 				})
 				if err == nil {
 					atomic.StoreInt64(&l.token, resp.Token)