mirror of
https://github.com/fatedier/frp.git
synced 2024-12-18 20:57:53 +08:00
143 lines
3.3 KiB
Go
143 lines
3.3 KiB
Go
//+build ignore
|
|
|
|
// Copyright 2015, Klaus Post, see LICENSE for details.
|
|
//
|
|
// Simple stream encoder example
|
|
//
|
|
// The encoder encodes a single file into a number of shards
|
|
// To reverse the process see "stream-decoder.go"
|
|
//
|
|
// To build an executable use:
|
|
//
|
|
// go build stream-encoder.go
|
|
//
|
|
// Simple Encoder/Decoder Shortcomings:
|
|
// * If the file size of the input isn't dividable by the number of data shards
|
|
// the output will contain extra zeroes
|
|
//
|
|
// * If the shard numbers isn't the same for the decoder as in the
|
|
// encoder, invalid output will be generated.
|
|
//
|
|
// * If values have changed in a shard, it cannot be reconstructed.
|
|
//
|
|
// * If two shards have been swapped, reconstruction will always fail.
|
|
// You need to supply the shards in the same order as they were given to you.
|
|
//
|
|
// The solution for this is to save a metadata file containing:
|
|
//
|
|
// * File size.
|
|
// * The number of data/parity shards.
|
|
// * HASH of each shard.
|
|
// * Order of the shards.
|
|
//
|
|
// If you save these properties, you should abe able to detect file corruption
|
|
// in a shard and be able to reconstruct your data if you have the needed number of shards left.
|
|
|
|
package main
|
|
|
|
import (
|
|
"flag"
|
|
"fmt"
|
|
"os"
|
|
"path/filepath"
|
|
|
|
"io"
|
|
|
|
"github.com/klauspost/reedsolomon"
|
|
)
|
|
|
|
var dataShards = flag.Int("data", 4, "Number of shards to split the data into, must be below 257.")
|
|
var parShards = flag.Int("par", 2, "Number of parity shards")
|
|
var outDir = flag.String("out", "", "Alternative output directory")
|
|
|
|
func init() {
|
|
flag.Usage = func() {
|
|
fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0])
|
|
fmt.Fprintf(os.Stderr, " %s [-flags] filename.ext\n\n", os.Args[0])
|
|
fmt.Fprintf(os.Stderr, "Valid flags:\n")
|
|
flag.PrintDefaults()
|
|
}
|
|
}
|
|
|
|
func main() {
|
|
// Parse command line parameters.
|
|
flag.Parse()
|
|
args := flag.Args()
|
|
if len(args) != 1 {
|
|
fmt.Fprintf(os.Stderr, "Error: No input filename given\n")
|
|
flag.Usage()
|
|
os.Exit(1)
|
|
}
|
|
if *dataShards > 257 {
|
|
fmt.Fprintf(os.Stderr, "Error: Too many data shards\n")
|
|
os.Exit(1)
|
|
}
|
|
fname := args[0]
|
|
|
|
// Create encoding matrix.
|
|
enc, err := reedsolomon.NewStream(*dataShards, *parShards)
|
|
checkErr(err)
|
|
|
|
fmt.Println("Opening", fname)
|
|
f, err := os.Open(fname)
|
|
checkErr(err)
|
|
|
|
instat, err := f.Stat()
|
|
checkErr(err)
|
|
|
|
shards := *dataShards + *parShards
|
|
out := make([]*os.File, shards)
|
|
|
|
// Create the resulting files.
|
|
dir, file := filepath.Split(fname)
|
|
if *outDir != "" {
|
|
dir = *outDir
|
|
}
|
|
for i := range out {
|
|
outfn := fmt.Sprintf("%s.%d", file, i)
|
|
fmt.Println("Creating", outfn)
|
|
out[i], err = os.Create(filepath.Join(dir, outfn))
|
|
checkErr(err)
|
|
}
|
|
|
|
// Split into files.
|
|
data := make([]io.Writer, *dataShards)
|
|
for i := range data {
|
|
data[i] = out[i]
|
|
}
|
|
// Do the split
|
|
err = enc.Split(f, data, instat.Size())
|
|
checkErr(err)
|
|
|
|
// Close and re-open the files.
|
|
input := make([]io.Reader, *dataShards)
|
|
|
|
for i := range data {
|
|
out[i].Close()
|
|
f, err := os.Open(out[i].Name())
|
|
checkErr(err)
|
|
input[i] = f
|
|
defer f.Close()
|
|
}
|
|
|
|
// Create parity output writers
|
|
parity := make([]io.Writer, *parShards)
|
|
for i := range parity {
|
|
parity[i] = out[*dataShards+i]
|
|
defer out[*dataShards+i].Close()
|
|
}
|
|
|
|
// Encode parity
|
|
err = enc.Encode(input, parity)
|
|
checkErr(err)
|
|
fmt.Printf("File split into %d data + %d parity shards.\n", *dataShards, *parShards)
|
|
|
|
}
|
|
|
|
func checkErr(err error) {
|
|
if err != nil {
|
|
fmt.Fprintf(os.Stderr, "Error: %s", err.Error())
|
|
os.Exit(2)
|
|
}
|
|
}
|