seaweedfs/weed-fs/src/cmd/weed/volume.go

338 lines
10 KiB
Go
Raw Normal View History

package main
import (
"bytes"
2012-08-24 13:46:54 +08:00
"log"
"math/rand"
"mime"
"net/http"
"os"
"pkg/operation"
2012-08-24 13:46:54 +08:00
"pkg/storage"
"runtime"
2012-08-24 13:46:54 +08:00
"strconv"
"strings"
"time"
)
func init() {
2012-08-24 13:46:54 +08:00
cmdVolume.Run = runVolume // break init cycle
IsDebug = cmdVolume.Flag.Bool("debug", false, "enable debug mode")
}
var cmdVolume = &Command{
2012-09-26 16:55:56 +08:00
UsageLine: "volume -port=8080 -dir=/tmp -max=5 -ip=server_name -mserver=localhost:9333",
2012-08-24 13:46:54 +08:00
Short: "start a volume server",
Long: `start a volume server to provide storage spaces
`,
}
var (
vport = cmdVolume.Flag.Int("port", 8080, "http listen port")
2012-09-24 05:51:25 +08:00
volumeFolder = cmdVolume.Flag.String("dir", "/tmp", "directory to store data files")
2012-09-26 17:29:16 +08:00
ip = cmdVolume.Flag.String("ip", "localhost", "ip or server name")
2012-09-26 16:55:56 +08:00
publicUrl = cmdVolume.Flag.String("publicUrl", "", "Publicly accessible <ip|server_name>:<port>")
2012-09-24 05:51:25 +08:00
masterNode = cmdVolume.Flag.String("mserver", "localhost:9333", "master server location")
2012-09-29 01:21:06 +08:00
vpulse = cmdVolume.Flag.Int("pulseSeconds", 5, "number of seconds between heartbeats, must be smaller than the master's setting")
2012-09-20 07:56:35 +08:00
maxVolumeCount = cmdVolume.Flag.Int("max", 5, "maximum number of volumes")
vReadTimeout = cmdVolume.Flag.Int("readTimeout", 3, "connection read timeout in seconds")
vMaxCpu = cmdVolume.Flag.Int("maxCpu", 0, "maximum number of CPUs. 0 means all available CPUs")
2012-08-24 13:46:54 +08:00
store *storage.Store
)
func statusHandler(w http.ResponseWriter, r *http.Request) {
m := make(map[string]interface{})
m["Version"] = VERSION
m["Volumes"] = store.Status()
writeJson(w, r, m)
}
2012-09-11 08:08:52 +08:00
func assignVolumeHandler(w http.ResponseWriter, r *http.Request) {
err := store.AddVolume(r.FormValue("volume"), r.FormValue("replicationType"))
if err == nil {
writeJson(w, r, map[string]string{"error": ""})
} else {
writeJson(w, r, map[string]string{"error": err.Error()})
}
2012-11-07 17:51:43 +08:00
debug("assign volume =", r.FormValue("volume"), ", replicationType =", r.FormValue("replicationType"), ", error =", err)
}
2012-11-24 09:03:27 +08:00
func vacuumVolumeCheckHandler(w http.ResponseWriter, r *http.Request) {
err, ret := store.CheckCompactVolume(r.FormValue("volume"), r.FormValue("garbageThreshold"))
if err == nil {
writeJson(w, r, map[string]interface{}{"error": "", "result": ret})
} else {
writeJson(w, r, map[string]interface{}{"error": err.Error(), "result": false})
}
debug("checked compacting volume =", r.FormValue("volume"), "garbageThreshold =", r.FormValue("garbageThreshold"), "vacuum =", ret)
}
2012-11-07 17:51:43 +08:00
func vacuumVolumeCompactHandler(w http.ResponseWriter, r *http.Request) {
err := store.CompactVolume(r.FormValue("volume"))
if err == nil {
writeJson(w, r, map[string]string{"error": ""})
} else {
writeJson(w, r, map[string]string{"error": err.Error()})
}
debug("compacted volume =", r.FormValue("volume"), ", error =", err)
}
func vacuumVolumeCommitHandler(w http.ResponseWriter, r *http.Request) {
2012-11-24 09:03:27 +08:00
err := store.CommitCompactVolume(r.FormValue("volume"))
if err == nil {
2012-11-24 09:03:27 +08:00
writeJson(w, r, map[string]interface{}{"error": ""})
} else {
writeJson(w, r, map[string]string{"error": err.Error()})
}
debug("commit compact volume =", r.FormValue("volume"), ", error =", err)
2012-09-11 08:08:52 +08:00
}
func storeHandler(w http.ResponseWriter, r *http.Request) {
2012-08-24 13:46:54 +08:00
switch r.Method {
case "GET":
GetHandler(w, r)
case "DELETE":
DeleteHandler(w, r)
case "POST":
PostHandler(w, r)
}
}
func GetHandler(w http.ResponseWriter, r *http.Request) {
2012-08-24 13:46:54 +08:00
n := new(storage.Needle)
vid, fid, ext := parseURLPath(r.URL.Path)
2012-09-26 07:05:31 +08:00
volumeId, err := storage.NewVolumeId(vid)
if err != nil {
2012-09-28 03:17:27 +08:00
debug("parsing error:", err, r.URL.Path)
2012-09-26 07:05:31 +08:00
return
}
2012-08-24 13:46:54 +08:00
n.ParsePath(fid)
2012-09-28 03:17:27 +08:00
debug("volume", volumeId, "reading", n)
if !store.HasVolume(volumeId) {
2012-09-26 07:05:31 +08:00
lookupResult, err := operation.Lookup(*masterNode, volumeId)
2012-09-28 03:17:27 +08:00
debug("volume", volumeId, "found on", lookupResult, "error", err)
if err == nil {
http.Redirect(w, r, "http://"+lookupResult.Locations[0].PublicUrl+r.URL.Path, http.StatusMovedPermanently)
} else {
2012-09-28 03:17:27 +08:00
debug("lookup error:", err, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
}
return
}
2012-08-24 13:46:54 +08:00
cookie := n.Cookie
count, e := store.Read(volumeId, n)
2012-09-28 03:17:27 +08:00
debug("read bytes", count, "error", e)
if e != nil || count <= 0 {
2012-09-28 03:17:27 +08:00
debug("read error:", e, r.URL.Path)
w.WriteHeader(http.StatusNotFound)
return
2012-08-24 13:46:54 +08:00
}
if n.Cookie != cookie {
log.Println("request with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
w.WriteHeader(http.StatusNotFound)
2012-08-24 13:46:54 +08:00
return
}
if ext != "" {
mtype := mime.TypeByExtension(ext)
w.Header().Set("Content-Type", mtype)
2012-10-24 01:59:40 +08:00
if storage.IsGzippable(ext, mtype) {
2012-08-24 13:46:54 +08:00
if strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
w.Header().Set("Content-Encoding", "gzip")
} else {
n.Data = storage.UnGzipData(n.Data)
}
}
}
w.Write(n.Data)
}
func PostHandler(w http.ResponseWriter, r *http.Request) {
2012-09-21 16:30:31 +08:00
r.ParseForm()
2012-08-24 13:46:54 +08:00
vid, _, _ := parseURLPath(r.URL.Path)
2012-09-04 11:40:38 +08:00
volumeId, e := storage.NewVolumeId(vid)
2012-08-24 13:46:54 +08:00
if e != nil {
writeJson(w, r, e)
} else {
needle, filename, ne := storage.NewNeedle(r)
2012-08-24 13:46:54 +08:00
if ne != nil {
writeJson(w, r, ne)
} else {
ret := store.Write(volumeId, needle)
2012-09-27 05:28:46 +08:00
errorStatus := ""
needToReplicate := !store.HasVolume(volumeId)
if ret > 0 {
needToReplicate = needToReplicate || store.GetVolume(volumeId).NeedToReplicate()
}else{
errorStatus = "Failed to write to local disk"
}
if !needToReplicate && ret > 0 {
needToReplicate = store.GetVolume(volumeId).NeedToReplicate()
}
if needToReplicate { //send to other replica locations
2012-09-21 08:58:29 +08:00
if r.FormValue("type") != "standard" {
if !distributedOperation(volumeId, func(location operation.Location) bool {
2012-09-27 04:38:45 +08:00
_, err := operation.Upload("http://"+location.Url+r.URL.Path+"?type=standard", filename, bytes.NewReader(needle.Data))
return err == nil
}) {
ret = 0
2012-09-27 05:28:46 +08:00
errorStatus = "Failed to write to replicas for volume " + volumeId.String()
2012-09-27 04:38:45 +08:00
}
2012-09-21 08:58:29 +08:00
}
}
2012-09-27 05:28:46 +08:00
m := make(map[string]interface{})
if errorStatus == "" {
w.WriteHeader(http.StatusCreated)
} else {
2012-09-27 11:30:05 +08:00
store.Delete(volumeId, needle)
distributedOperation(volumeId, func(location operation.Location) bool {
return nil == operation.Delete("http://"+location.Url+r.URL.Path+"?type=standard")
})
w.WriteHeader(http.StatusInternalServerError)
m["error"] = errorStatus
}
2012-08-24 13:46:54 +08:00
m["size"] = ret
writeJson(w, r, m)
}
}
}
func DeleteHandler(w http.ResponseWriter, r *http.Request) {
2012-08-24 13:46:54 +08:00
n := new(storage.Needle)
vid, fid, _ := parseURLPath(r.URL.Path)
2012-09-04 11:40:38 +08:00
volumeId, _ := storage.NewVolumeId(vid)
2012-08-24 13:46:54 +08:00
n.ParsePath(fid)
2012-09-28 03:17:27 +08:00
debug("deleting", n)
2012-08-24 13:46:54 +08:00
cookie := n.Cookie
count, ok := store.Read(volumeId, n)
if ok != nil {
m := make(map[string]uint32)
m["size"] = 0
writeJson(w, r, m)
return
}
if n.Cookie != cookie {
log.Println("delete with unmaching cookie from ", r.RemoteAddr, "agent", r.UserAgent())
return
}
n.Size = 0
2012-09-26 18:27:10 +08:00
ret := store.Delete(volumeId, n)
needToReplicate := !store.HasVolume(volumeId)
if !needToReplicate && ret > 0 {
needToReplicate = store.GetVolume(volumeId).NeedToReplicate()
}
if needToReplicate { //send to other replica locations
2012-09-26 18:27:10 +08:00
if r.FormValue("type") != "standard" {
if !distributedOperation(volumeId, func(location operation.Location) bool {
2012-09-27 04:38:45 +08:00
return nil == operation.Delete("http://"+location.Url+r.URL.Path+"?type=standard")
}) {
ret = 0
}
2012-09-26 18:27:10 +08:00
}
}
if ret != 0 {
w.WriteHeader(http.StatusAccepted)
2012-09-27 04:38:45 +08:00
} else {
w.WriteHeader(http.StatusInternalServerError)
2012-09-26 18:27:10 +08:00
}
2012-08-24 13:46:54 +08:00
m := make(map[string]uint32)
m["size"] = uint32(count)
writeJson(w, r, m)
}
2012-09-28 03:17:27 +08:00
func parseURLPath(path string) (vid, fid, ext string) {
2012-08-24 13:46:54 +08:00
sepIndex := strings.LastIndex(path, "/")
commaIndex := strings.LastIndex(path[sepIndex:], ",")
if commaIndex <= 0 {
if "favicon.ico" != path[sepIndex+1:] {
log.Println("unknown file id", path[sepIndex+1:])
}
return
}
dotIndex := strings.LastIndex(path[sepIndex:], ".")
vid = path[sepIndex+1 : commaIndex]
fid = path[commaIndex+1:]
ext = ""
if dotIndex > 0 {
fid = path[commaIndex+1 : dotIndex]
ext = path[dotIndex:]
}
return
}
2012-09-28 03:17:27 +08:00
func distributedOperation(volumeId storage.VolumeId, op func(location operation.Location) bool) bool {
2012-09-26 18:27:10 +08:00
if lookupResult, lookupErr := operation.Lookup(*masterNode, volumeId); lookupErr == nil {
2012-09-27 04:38:45 +08:00
length := 0
selfUrl := (*ip + ":" + strconv.Itoa(*vport))
results := make(chan bool)
for _, location := range lookupResult.Locations {
if location.Url != selfUrl {
length++
2012-09-28 03:17:27 +08:00
go func(location operation.Location, results chan bool) {
results <- op(location)
}(location, results)
2012-09-26 18:27:10 +08:00
}
}
2012-09-27 04:38:45 +08:00
ret := true
for i := 0; i < length; i++ {
ret = ret && <-results
}
return ret
2012-09-26 18:27:10 +08:00
} else {
log.Println("Failed to lookup for", volumeId, lookupErr.Error())
}
2012-09-27 04:38:45 +08:00
return false
2012-09-26 18:27:10 +08:00
}
func runVolume(cmd *Command, args []string) bool {
2012-11-07 17:51:43 +08:00
if *vMaxCpu < 1 {
*vMaxCpu = runtime.NumCPU()
}
runtime.GOMAXPROCS(*vMaxCpu)
fileInfo, err := os.Stat(*volumeFolder)
if err != nil {
log.Fatalf("No Existing Folder:%s", *volumeFolder)
}
if !fileInfo.IsDir() {
log.Fatalf("Volume Folder should not be a file:%s", *volumeFolder)
}
perm := fileInfo.Mode().Perm()
log.Println("Volume Folder permission:", perm)
2012-09-26 18:27:10 +08:00
2012-09-26 17:29:16 +08:00
if *publicUrl == "" {
2012-09-26 18:27:10 +08:00
*publicUrl = *ip + ":" + strconv.Itoa(*vport)
2012-09-26 17:29:16 +08:00
}
2012-09-26 16:55:56 +08:00
store = storage.NewStore(*vport, *ip, *publicUrl, *volumeFolder, *maxVolumeCount)
2012-08-24 13:46:54 +08:00
defer store.Close()
http.HandleFunc("/", storeHandler)
http.HandleFunc("/status", statusHandler)
2012-09-11 08:08:52 +08:00
http.HandleFunc("/admin/assign_volume", assignVolumeHandler)
2012-11-24 09:03:27 +08:00
http.HandleFunc("/admin/vacuum_volume_check", vacuumVolumeCheckHandler)
2012-11-07 17:51:43 +08:00
http.HandleFunc("/admin/vacuum_volume_compact", vacuumVolumeCompactHandler)
http.HandleFunc("/admin/vacuum_volume_commit", vacuumVolumeCommitHandler)
2012-08-24 13:46:54 +08:00
go func() {
for {
2012-09-09 07:25:44 +08:00
store.Join(*masterNode)
2012-09-04 11:40:38 +08:00
time.Sleep(time.Duration(float32(*vpulse*1e3)*(1+rand.Float32())) * time.Millisecond)
2012-08-24 13:46:54 +08:00
}
}()
2012-09-09 07:25:44 +08:00
log.Println("store joined at", *masterNode)
2012-08-24 13:46:54 +08:00
2012-09-29 00:13:17 +08:00
log.Println("Start Weed volume server", VERSION, "at http://"+*ip+":"+strconv.Itoa(*vport))
srv := &http.Server{
2012-09-29 01:21:06 +08:00
Addr: ":" + strconv.Itoa(*vport),
Handler: http.DefaultServeMux,
ReadTimeout: (time.Duration(*vReadTimeout) * time.Second),
}
2012-09-29 00:13:17 +08:00
e := srv.ListenAndServe()
2012-08-24 13:46:54 +08:00
if e != nil {
2012-09-26 14:28:16 +08:00
log.Fatalf("Fail to start:%s", e.Error())
2012-08-24 13:46:54 +08:00
}
return true
}