mirror of
https://github.com/fatedier/frp.git
synced 2024-11-23 18:49:24 +08:00
refactoring monitor code, support prometheus (#1668)
* refactoring monitor code, support prometheus * remove vendor
This commit is contained in:
parent
6d1af85e80
commit
495d999b6c
1
.gitignore
vendored
1
.gitignore
vendored
@ -27,6 +27,7 @@ _testmain.go
|
||||
bin/
|
||||
packages/
|
||||
test/bin/
|
||||
vendor/
|
||||
|
||||
# Cache
|
||||
*.swp
|
||||
|
1
Makefile
1
Makefile
@ -1,4 +1,5 @@
|
||||
export PATH := $(GOPATH)/bin:$(PATH)
|
||||
export GO111MODULE=on
|
||||
|
||||
all: fmt build
|
||||
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/fatedier/golib/crypto"
|
||||
|
||||
_ "github.com/fatedier/frp/assets/frps/statik"
|
||||
_ "github.com/fatedier/frp/models/metrics"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -33,6 +33,9 @@ dashboard_port = 7500
|
||||
dashboard_user = admin
|
||||
dashboard_pwd = admin
|
||||
|
||||
# enable_prometheus will export prometheus metrics on {dashboard_addr}:{dashboard_port} in /metrics api.
|
||||
enable_prometheus = true
|
||||
|
||||
# dashboard assets directory(only for debug mode)
|
||||
# assets_dir = ./static
|
||||
# console or real logFile path like ./frps.log
|
||||
|
4
go.mod
4
go.mod
@ -17,13 +17,13 @@ require (
|
||||
github.com/klauspost/reedsolomon v1.9.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.4 // indirect
|
||||
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc
|
||||
github.com/pkg/errors v0.8.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/prometheus/client_golang v1.4.1
|
||||
github.com/rakyll/statik v0.1.1
|
||||
github.com/rodaine/table v1.0.0
|
||||
github.com/spf13/cobra v0.0.3
|
||||
github.com/spf13/pflag v1.0.1 // indirect
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 // indirect
|
||||
github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 // indirect
|
||||
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 // indirect
|
||||
|
102
go.sum
102
go.sum
@ -1,81 +1,141 @@
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk=
|
||||
github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb h1:wCrNShQidLmvVWn/0PikGmpdP0vtQmnvyRg3ZBEhczw=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fatedier/beego v0.0.0-20171024143340-6c6a4f5bd5eb/go.mod h1:wx3gB6dbIfBRcucp94PI9Bt3I0F2c/MyNEWuhzpWiwk=
|
||||
github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049 h1:teH578mf2ii42NHhIp3PhgvjU5bv+NFMq9fSQR8NaG8=
|
||||
github.com/fatedier/golib v0.0.0-20181107124048-ff8cd814b049/go.mod h1:DqIrnl0rp3Zybg9zbJmozTy1n8fYJoX+QoAj9slIkKM=
|
||||
github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible h1:ssXat9YXFvigNge/IkkZvFMn8yeYKFX+uI6wn2mLJ74=
|
||||
github.com/fatedier/kcp-go v2.0.4-0.20190803094908-fe8645b0a904+incompatible/go.mod h1:YpCOaxj7vvMThhIQ9AfTOPW2sfztQR5WDfs7AflSy4s=
|
||||
github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049 h1:K9KHZbXKpGydfDN0aZrsoHpLJlZsBrGMFWbgLDGnPZk=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d h1:kJCB4vdITiW1eC1vq2e6IsrXKrZit1bv/TDYFGMp4BQ=
|
||||
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/reedsolomon v1.9.1 h1:kYrT1MlR4JH6PqOpC+okdb9CDTcwEC/BqpzK4WFyXL8=
|
||||
github.com/klauspost/reedsolomon v1.9.1/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
|
||||
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc h1:lNOt1SMsgHXTdpuGw+RpnJtzUcCb/oRKZP65pBy9pr8=
|
||||
github.com/pires/go-proxyproto v0.0.0-20190111085350-4d51b51e3bfc/go.mod h1:6/gX3+E/IYGa0wMORlSMla999awQFdbaeQCHjSMKIzY=
|
||||
github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/rakyll/statik v0.1.1 h1:fCLHsIMajHqD5RKigbFXpvX3dN7c80Pm12+NCrI3kvg=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8=
|
||||
github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/rakyll/statik v0.1.1/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs=
|
||||
github.com/rodaine/table v1.0.0 h1:UaCJG5Axc/cNXVGXqnCrffm1KxP0OfYLe1HuJLf5sFY=
|
||||
github.com/rodaine/table v1.0.0/go.mod h1:YAUzwPOji0DUJNEvggdxyQcUAl4g3hDRcFlyjnnR51I=
|
||||
github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047 h1:K+jtWCOuZgCra7eXZ/VWn2FbJmrA/D058mTXhh2rq+8=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/templexxx/cpufeat v0.0.0-20170927014610-3794dfbfb047/go.mod h1:wM7WEvslTq+iOEAMDLSzhVuOt5BRZ05WirO+b09GHQU=
|
||||
github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554 h1:pexgSe+JCFuxG+uoMZLO+ce8KHtdHGhst4cs6rw3gmk=
|
||||
github.com/templexxx/xor v0.0.0-20170926022130-0af8e873c554/go.mod h1:5XA7W9S6mni3h5uvOC75dA3m9CCCaS83lltmc0ukdi4=
|
||||
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8 h1:6CNSDqI1wiE+JqyOy5Qt/yo/DoNI2/QmmOZeiCid2Nw=
|
||||
github.com/tjfoc/gmsm v0.0.0-20171124023159-98aa888b79d8/go.mod h1:XxO4hdhhrzAd+G4CjDqaOkd0hUzmtPR/d3EiBBMn/wc=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec h1:DGmKwyZwEB8dI7tbLt/I/gQuP559o/0FrAkHKlQM/Ks=
|
||||
github.com/vaughan0/go-ini v0.0.0-20130923145212-a98ad7ee00ec/go.mod h1:owBmyHYMLkxyrugmfwE/DLJyW8Ro9mkphwuVErQ0iUw=
|
||||
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae h1:J0GxkO96kL4WF+AIT3M4mfUVinOCPgf2uUWYFUzN0sM=
|
||||
github.com/xtaci/lossyconn v0.0.0-20190602105132-8df528c0c9ae/go.mod h1:gXtu8J62kEgmN++bm9BVICuT/e8yiLI2KFobd/TRFsE=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/square/go-jose.v2 v2.4.1 h1:H0TmLt7/KmzlrDOpa1F+zr0Tk90PbJYBfsVUmRLrf9Y=
|
||||
gopkg.in/square/go-jose.v2 v2.4.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -48,31 +48,25 @@ type ServerCommonConf struct {
|
||||
// ProxyBindAddr specifies the address that the proxy binds to. This value
|
||||
// may be the same as BindAddr. By default, this value is "0.0.0.0".
|
||||
ProxyBindAddr string `json:"proxy_bind_addr"`
|
||||
|
||||
// VhostHttpPort specifies the port that the server listens for HTTP Vhost
|
||||
// requests. If this value is 0, the server will not listen for HTTP
|
||||
// requests. By default, this value is 0.
|
||||
VhostHttpPort int `json:"vhost_http_port"`
|
||||
|
||||
// VhostHttpsPort specifies the port that the server listens for HTTPS
|
||||
// Vhost requests. If this value is 0, the server will not listen for HTTPS
|
||||
// requests. By default, this value is 0.
|
||||
VhostHttpsPort int `json:"vhost_https_port"`
|
||||
|
||||
// TcpMuxHttpConnectPort specifies the port that the server listens for TCP
|
||||
// HTTP CONNECT requests. If the value is 0, the server will not multiplex TCP
|
||||
// requests on one single port. If it's not - it will listen on this value for
|
||||
// HTTP CONNECT requests. By default, this value is 0.
|
||||
TcpMuxHttpConnectPort int `json:"tcpmux_httpconnect_port"`
|
||||
|
||||
// VhostHttpTimeout specifies the response header timeout for the Vhost
|
||||
// HTTP server, in seconds. By default, this value is 60.
|
||||
VhostHttpTimeout int64 `json:"vhost_http_timeout"`
|
||||
|
||||
// DashboardAddr specifies the address that the dashboard binds to. By
|
||||
// default, this value is "0.0.0.0".
|
||||
DashboardAddr string `json:"dashboard_addr"`
|
||||
|
||||
// DashboardPort specifies the port that the dashboard listens on. If this
|
||||
// value is 0, the dashboard will not be started. By default, this value is
|
||||
// 0.
|
||||
@ -83,6 +77,9 @@ type ServerCommonConf struct {
|
||||
// DashboardUser specifies the password that the dashboard will use for
|
||||
// login. By default, this value is "admin".
|
||||
DashboardPwd string `json:"dashboard_pwd"`
|
||||
// EnablePrometheus will export prometheus metrics on {dashboard_addr}:{dashboard_port}
|
||||
// in /metrics api.
|
||||
EnablePrometheus bool `json:"enable_prometheus"`
|
||||
// AssetsDir specifies the local directory that the dashboard will load
|
||||
// resources from. If this value is "", assets will be loaded from the
|
||||
// bundled executable using statik. By default, this value is "".
|
||||
@ -167,6 +164,7 @@ func GetDefaultServerConf() ServerCommonConf {
|
||||
DashboardPort: 0,
|
||||
DashboardUser: "admin",
|
||||
DashboardPwd: "admin",
|
||||
EnablePrometheus: false,
|
||||
AssetsDir: "",
|
||||
LogFile: "console",
|
||||
LogWay: "console",
|
||||
@ -312,6 +310,10 @@ func UnmarshalServerConfFromIni(content string) (cfg ServerCommonConf, err error
|
||||
cfg.DashboardPwd = tmpStr
|
||||
}
|
||||
|
||||
if tmpStr, ok = conf.Get("common", "enable_prometheus"); ok && tmpStr == "true" {
|
||||
cfg.EnablePrometheus = true
|
||||
}
|
||||
|
||||
if tmpStr, ok = conf.Get("common", "assets_dir"); ok {
|
||||
cfg.AssetsDir = tmpStr
|
||||
}
|
||||
|
93
models/metrics/aggregate/server.go
Normal file
93
models/metrics/aggregate/server.go
Normal file
@ -0,0 +1,93 @@
|
||||
// Copyright 2020 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package aggregate
|
||||
|
||||
import (
|
||||
"github.com/fatedier/frp/models/metrics/mem"
|
||||
"github.com/fatedier/frp/models/metrics/prometheus"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
)
|
||||
|
||||
// EnableMem start to mark metrics to memory monitor system.
|
||||
func EnableMem() {
|
||||
sm.Add(mem.ServerMetrics)
|
||||
}
|
||||
|
||||
// EnablePrometheus start to mark metrics to prometheus.
|
||||
func EnablePrometheus() {
|
||||
sm.Add(prometheus.ServerMetrics)
|
||||
}
|
||||
|
||||
var sm *serverMetrics = &serverMetrics{}
|
||||
|
||||
func init() {
|
||||
metrics.Register(sm)
|
||||
}
|
||||
|
||||
type serverMetrics struct {
|
||||
ms []metrics.ServerMetrics
|
||||
}
|
||||
|
||||
func (m *serverMetrics) Add(sm metrics.ServerMetrics) {
|
||||
m.ms = append(m.ms, sm)
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewClient() {
|
||||
for _, v := range m.ms {
|
||||
v.NewClient()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseClient() {
|
||||
for _, v := range m.ms {
|
||||
v.CloseClient()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewProxy(name string, proxyType string) {
|
||||
for _, v := range m.ms {
|
||||
v.NewProxy(name, proxyType)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseProxy(name string, proxyType string) {
|
||||
for _, v := range m.ms {
|
||||
v.CloseProxy(name, proxyType)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) OpenConnection(name string, proxyType string) {
|
||||
for _, v := range m.ms {
|
||||
v.OpenConnection(name, proxyType)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseConnection(name string, proxyType string) {
|
||||
for _, v := range m.ms {
|
||||
v.CloseConnection(name, proxyType)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
|
||||
for _, v := range m.ms {
|
||||
v.AddTrafficIn(name, proxyType, trafficBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
|
||||
for _, v := range m.ms {
|
||||
v.AddTrafficOut(name, proxyType, trafficBytes)
|
||||
}
|
||||
}
|
262
models/metrics/mem/server.go
Normal file
262
models/metrics/mem/server.go
Normal file
@ -0,0 +1,262 @@
|
||||
// Copyright 2019 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mem
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
server "github.com/fatedier/frp/server/metrics"
|
||||
"github.com/fatedier/frp/utils/log"
|
||||
"github.com/fatedier/frp/utils/metric"
|
||||
)
|
||||
|
||||
var sm *serverMetrics = newServerMetrics()
|
||||
var ServerMetrics server.ServerMetrics
|
||||
var StatsCollector Collector
|
||||
|
||||
func init() {
|
||||
ServerMetrics = sm
|
||||
StatsCollector = sm
|
||||
sm.run()
|
||||
}
|
||||
|
||||
type serverMetrics struct {
|
||||
info *ServerStatistics
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func newServerMetrics() *serverMetrics {
|
||||
return &serverMetrics{
|
||||
info: &ServerStatistics{
|
||||
TotalTrafficIn: metric.NewDateCounter(ReserveDays),
|
||||
TotalTrafficOut: metric.NewDateCounter(ReserveDays),
|
||||
CurConns: metric.NewCounter(),
|
||||
|
||||
ClientCounts: metric.NewCounter(),
|
||||
ProxyTypeCounts: make(map[string]metric.Counter),
|
||||
|
||||
ProxyStatistics: make(map[string]*ProxyStatistics),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) run() {
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(12 * time.Hour)
|
||||
log.Debug("start to clear useless proxy statistics data...")
|
||||
m.clearUselessInfo()
|
||||
log.Debug("finish to clear useless proxy statistics data")
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) clearUselessInfo() {
|
||||
// To check if there are proxies that closed than 7 days and drop them.
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
for name, data := range m.info.ProxyStatistics {
|
||||
if !data.LastCloseTime.IsZero() && time.Since(data.LastCloseTime) > time.Duration(7*24)*time.Hour {
|
||||
delete(m.info.ProxyStatistics, name)
|
||||
log.Trace("clear proxy [%s]'s statistics data, lastCloseTime: [%s]", name, data.LastCloseTime.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewClient() {
|
||||
m.info.ClientCounts.Inc(1)
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseClient() {
|
||||
m.info.ClientCounts.Dec(1)
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewProxy(name string, proxyType string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
counter, ok := m.info.ProxyTypeCounts[proxyType]
|
||||
if !ok {
|
||||
counter = metric.NewCounter()
|
||||
}
|
||||
counter.Inc(1)
|
||||
m.info.ProxyTypeCounts[proxyType] = counter
|
||||
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if !(ok && proxyStats.ProxyType == proxyType) {
|
||||
proxyStats = &ProxyStatistics{
|
||||
Name: name,
|
||||
ProxyType: proxyType,
|
||||
CurConns: metric.NewCounter(),
|
||||
TrafficIn: metric.NewDateCounter(ReserveDays),
|
||||
TrafficOut: metric.NewDateCounter(ReserveDays),
|
||||
}
|
||||
m.info.ProxyStatistics[name] = proxyStats
|
||||
}
|
||||
proxyStats.LastStartTime = time.Now()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseProxy(name string, proxyType string) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
if counter, ok := m.info.ProxyTypeCounts[proxyType]; ok {
|
||||
counter.Dec(1)
|
||||
}
|
||||
if proxyStats, ok := m.info.ProxyStatistics[name]; ok {
|
||||
proxyStats.LastCloseTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) OpenConnection(name string, proxyType string) {
|
||||
m.info.CurConns.Inc(1)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
proxyStats.CurConns.Inc(1)
|
||||
m.info.ProxyStatistics[name] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseConnection(name string, proxyType string) {
|
||||
m.info.CurConns.Dec(1)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
proxyStats.CurConns.Dec(1)
|
||||
m.info.ProxyStatistics[name] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
|
||||
m.info.TotalTrafficIn.Inc(trafficBytes)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
proxyStats.TrafficIn.Inc(trafficBytes)
|
||||
m.info.ProxyStatistics[name] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
|
||||
m.info.TotalTrafficOut.Inc(trafficBytes)
|
||||
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
proxyStats.TrafficOut.Inc(trafficBytes)
|
||||
m.info.ProxyStatistics[name] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
// Get stats data api.
|
||||
|
||||
func (m *serverMetrics) GetServer() *ServerStats {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
s := &ServerStats{
|
||||
TotalTrafficIn: m.info.TotalTrafficIn.TodayCount(),
|
||||
TotalTrafficOut: m.info.TotalTrafficOut.TodayCount(),
|
||||
CurConns: m.info.CurConns.Count(),
|
||||
ClientCounts: m.info.ClientCounts.Count(),
|
||||
ProxyTypeCounts: make(map[string]int64),
|
||||
}
|
||||
for k, v := range m.info.ProxyTypeCounts {
|
||||
s.ProxyTypeCounts[k] = v.Count()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (m *serverMetrics) GetProxiesByType(proxyType string) []*ProxyStats {
|
||||
res := make([]*ProxyStats, 0)
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for name, proxyStats := range m.info.ProxyStatistics {
|
||||
if proxyStats.ProxyType != proxyType {
|
||||
continue
|
||||
}
|
||||
|
||||
ps := &ProxyStats{
|
||||
Name: name,
|
||||
Type: proxyStats.ProxyType,
|
||||
TodayTrafficIn: proxyStats.TrafficIn.TodayCount(),
|
||||
TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
|
||||
CurConns: proxyStats.CurConns.Count(),
|
||||
}
|
||||
if !proxyStats.LastStartTime.IsZero() {
|
||||
ps.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
|
||||
}
|
||||
if !proxyStats.LastCloseTime.IsZero() {
|
||||
ps.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
|
||||
}
|
||||
res = append(res, ps)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (m *serverMetrics) GetProxiesByTypeAndName(proxyType string, proxyName string) (res *ProxyStats) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
for name, proxyStats := range m.info.ProxyStatistics {
|
||||
if proxyStats.ProxyType != proxyType {
|
||||
continue
|
||||
}
|
||||
|
||||
if name != proxyName {
|
||||
continue
|
||||
}
|
||||
|
||||
res = &ProxyStats{
|
||||
Name: name,
|
||||
Type: proxyStats.ProxyType,
|
||||
TodayTrafficIn: proxyStats.TrafficIn.TodayCount(),
|
||||
TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
|
||||
CurConns: proxyStats.CurConns.Count(),
|
||||
}
|
||||
if !proxyStats.LastStartTime.IsZero() {
|
||||
res.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
|
||||
}
|
||||
if !proxyStats.LastCloseTime.IsZero() {
|
||||
res.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *serverMetrics) GetProxyTraffic(name string) (res *ProxyTrafficInfo) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
proxyStats, ok := m.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
res = &ProxyTrafficInfo{
|
||||
Name: name,
|
||||
}
|
||||
res.TrafficIn = proxyStats.TrafficIn.GetLastDaysCount(ReserveDays)
|
||||
res.TrafficOut = proxyStats.TrafficOut.GetLastDaysCount(ReserveDays)
|
||||
}
|
||||
return
|
||||
}
|
@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stats
|
||||
package mem
|
||||
|
||||
import (
|
||||
"time"
|
||||
@ -24,19 +24,6 @@ const (
|
||||
ReserveDays = 7
|
||||
)
|
||||
|
||||
type StatsType int
|
||||
|
||||
const (
|
||||
TypeNewClient StatsType = iota
|
||||
TypeCloseClient
|
||||
TypeNewProxy
|
||||
TypeCloseProxy
|
||||
TypeOpenConnection
|
||||
TypeCloseConnection
|
||||
TypeAddTrafficIn
|
||||
TypeAddTrafficOut
|
||||
)
|
||||
|
||||
type ServerStats struct {
|
||||
TotalTrafficIn int64
|
||||
TotalTrafficOut int64
|
||||
@ -88,42 +75,8 @@ type ServerStatistics struct {
|
||||
}
|
||||
|
||||
type Collector interface {
|
||||
Mark(statsType StatsType, payload interface{})
|
||||
Run() error
|
||||
GetServer() *ServerStats
|
||||
GetProxiesByType(proxyType string) []*ProxyStats
|
||||
GetProxiesByTypeAndName(proxyType string, proxyName string) *ProxyStats
|
||||
GetProxyTraffic(name string) *ProxyTrafficInfo
|
||||
}
|
||||
|
||||
type NewClientPayload struct{}
|
||||
|
||||
type CloseClientPayload struct{}
|
||||
|
||||
type NewProxyPayload struct {
|
||||
Name string
|
||||
ProxyType string
|
||||
}
|
||||
|
||||
type CloseProxyPayload struct {
|
||||
Name string
|
||||
ProxyType string
|
||||
}
|
||||
|
||||
type OpenConnectionPayload struct {
|
||||
ProxyName string
|
||||
}
|
||||
|
||||
type CloseConnectionPayload struct {
|
||||
ProxyName string
|
||||
}
|
||||
|
||||
type AddTrafficInPayload struct {
|
||||
ProxyName string
|
||||
TrafficBytes int64
|
||||
}
|
||||
|
||||
type AddTrafficOutPayload struct {
|
||||
ProxyName string
|
||||
TrafficBytes int64
|
||||
}
|
8
models/metrics/metrics.go
Normal file
8
models/metrics/metrics.go
Normal file
@ -0,0 +1,8 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/fatedier/frp/models/metrics/aggregate"
|
||||
)
|
||||
|
||||
var EnableMem = aggregate.EnableMem
|
||||
var EnablePrometheus = aggregate.EnablePrometheus
|
95
models/metrics/prometheus/server.go
Normal file
95
models/metrics/prometheus/server.go
Normal file
@ -0,0 +1,95 @@
|
||||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
namespace = "frp"
|
||||
serverSubsystem = "server"
|
||||
)
|
||||
|
||||
var ServerMetrics metrics.ServerMetrics = newServerMetrics()
|
||||
|
||||
type serverMetrics struct {
|
||||
clientCount prometheus.Gauge
|
||||
proxyCount *prometheus.GaugeVec
|
||||
connectionCount *prometheus.GaugeVec
|
||||
trafficIn *prometheus.CounterVec
|
||||
trafficOut *prometheus.CounterVec
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewClient() {
|
||||
m.clientCount.Inc()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseClient() {
|
||||
m.clientCount.Dec()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) NewProxy(name string, proxyType string) {
|
||||
m.proxyCount.WithLabelValues(proxyType).Inc()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseProxy(name string, proxyType string) {
|
||||
m.proxyCount.WithLabelValues(proxyType).Dec()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) OpenConnection(name string, proxyType string) {
|
||||
m.connectionCount.WithLabelValues(name, proxyType).Inc()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) CloseConnection(name string, proxyType string) {
|
||||
m.connectionCount.WithLabelValues(name, proxyType).Dec()
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {
|
||||
m.trafficIn.WithLabelValues(name, proxyType).Add(float64(trafficBytes))
|
||||
}
|
||||
|
||||
func (m *serverMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {
|
||||
m.trafficOut.WithLabelValues(name, proxyType).Add(float64(trafficBytes))
|
||||
}
|
||||
|
||||
func newServerMetrics() *serverMetrics {
|
||||
m := &serverMetrics{
|
||||
clientCount: prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: "client_counts",
|
||||
Help: "The current client counts of frps",
|
||||
}),
|
||||
proxyCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: "proxy_counts",
|
||||
Help: "The current proxy counts",
|
||||
}, []string{"type"}),
|
||||
connectionCount: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: "connection_counts",
|
||||
Help: "The current connection counts",
|
||||
}, []string{"name", "type"}),
|
||||
trafficIn: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: "traffic_in",
|
||||
Help: "The total in traffic",
|
||||
}, []string{"name", "type"}),
|
||||
trafficOut: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: serverSubsystem,
|
||||
Name: "traffic_out",
|
||||
Help: "The total out traffic",
|
||||
}, []string{"name", "type"}),
|
||||
}
|
||||
prometheus.MustRegister(m.clientCount)
|
||||
prometheus.MustRegister(m.proxyCount)
|
||||
prometheus.MustRegister(m.connectionCount)
|
||||
prometheus.MustRegister(m.trafficIn)
|
||||
prometheus.MustRegister(m.trafficOut)
|
||||
return m
|
||||
}
|
@ -30,8 +30,8 @@ import (
|
||||
"github.com/fatedier/frp/models/msg"
|
||||
plugin "github.com/fatedier/frp/models/plugin/server"
|
||||
"github.com/fatedier/frp/server/controller"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
"github.com/fatedier/frp/server/proxy"
|
||||
"github.com/fatedier/frp/server/stats"
|
||||
"github.com/fatedier/frp/utils/util"
|
||||
"github.com/fatedier/frp/utils/version"
|
||||
"github.com/fatedier/frp/utils/xlog"
|
||||
@ -92,9 +92,6 @@ type Control struct {
|
||||
// plugin manager
|
||||
pluginManager *plugin.Manager
|
||||
|
||||
// stats collector to store stats info of clients and proxies
|
||||
statsCollector stats.Collector
|
||||
|
||||
// verifies authentication based on selected method
|
||||
authVerifier auth.Verifier
|
||||
|
||||
@ -152,7 +149,6 @@ func NewControl(
|
||||
rc *controller.ResourceController,
|
||||
pxyManager *proxy.ProxyManager,
|
||||
pluginManager *plugin.Manager,
|
||||
statsCollector stats.Collector,
|
||||
authVerifier auth.Verifier,
|
||||
ctlConn net.Conn,
|
||||
loginMsg *msg.Login,
|
||||
@ -167,7 +163,6 @@ func NewControl(
|
||||
rc: rc,
|
||||
pxyManager: pxyManager,
|
||||
pluginManager: pluginManager,
|
||||
statsCollector: statsCollector,
|
||||
authVerifier: authVerifier,
|
||||
conn: ctlConn,
|
||||
loginMsg: loginMsg,
|
||||
@ -381,16 +376,12 @@ func (ctl *Control) stoper() {
|
||||
for _, pxy := range ctl.proxies {
|
||||
pxy.Close()
|
||||
ctl.pxyManager.Del(pxy.GetName())
|
||||
ctl.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseProxyPayload{
|
||||
Name: pxy.GetName(),
|
||||
ProxyType: pxy.GetConf().GetBaseInfo().ProxyType,
|
||||
})
|
||||
metrics.Server.CloseProxy(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
|
||||
}
|
||||
|
||||
ctl.allShutdown.Done()
|
||||
xl.Info("client exit success")
|
||||
|
||||
ctl.statsCollector.Mark(stats.TypeCloseClient, &stats.CloseClientPayload{})
|
||||
metrics.Server.CloseClient()
|
||||
}
|
||||
|
||||
// block until Control closed
|
||||
@ -451,10 +442,7 @@ func (ctl *Control) manager() {
|
||||
} else {
|
||||
resp.RemoteAddr = remoteAddr
|
||||
xl.Info("new proxy [%s] success", m.ProxyName)
|
||||
ctl.statsCollector.Mark(stats.TypeNewProxy, &stats.NewProxyPayload{
|
||||
Name: m.ProxyName,
|
||||
ProxyType: m.ProxyType,
|
||||
})
|
||||
metrics.Server.NewProxy(m.ProxyName, m.ProxyType)
|
||||
}
|
||||
ctl.sendCh <- resp
|
||||
case *msg.CloseProxy:
|
||||
@ -486,7 +474,7 @@ func (ctl *Control) RegisterProxy(pxyMsg *msg.NewProxy) (remoteAddr string, err
|
||||
|
||||
// NewProxy will return a interface Proxy.
|
||||
// In fact it create different proxies by different proxy type, we just call run() here.
|
||||
pxy, err := proxy.NewProxy(ctl.ctx, ctl.runId, ctl.rc, ctl.statsCollector, ctl.poolCount, ctl.GetWorkConn, pxyConf, ctl.serverCfg)
|
||||
pxy, err := proxy.NewProxy(ctl.ctx, ctl.runId, ctl.rc, ctl.poolCount, ctl.GetWorkConn, pxyConf, ctl.serverCfg)
|
||||
if err != nil {
|
||||
return remoteAddr, err
|
||||
}
|
||||
@ -548,9 +536,6 @@ func (ctl *Control) CloseProxy(closeMsg *msg.CloseProxy) (err error) {
|
||||
delete(ctl.proxies, closeMsg.ProxyName)
|
||||
ctl.mu.Unlock()
|
||||
|
||||
ctl.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseProxyPayload{
|
||||
Name: pxy.GetName(),
|
||||
ProxyType: pxy.GetConf().GetBaseInfo().ProxyType,
|
||||
})
|
||||
metrics.Server.CloseProxy(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
|
||||
return
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
frpNet "github.com/fatedier/frp/utils/net"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -38,6 +39,11 @@ func (svr *Service) RunDashboardServer(addr string, port int) (err error) {
|
||||
user, passwd := svr.cfg.DashboardUser, svr.cfg.DashboardPwd
|
||||
router.Use(frpNet.NewHttpAuthMiddleware(user, passwd).Middleware)
|
||||
|
||||
// metrics
|
||||
if svr.cfg.EnablePrometheus {
|
||||
router.Handle("/metrics", promhttp.Handler())
|
||||
}
|
||||
|
||||
// api, see dashboard_api.go
|
||||
router.HandleFunc("/api/serverinfo", svr.ApiServerInfo).Methods("GET")
|
||||
router.HandleFunc("/api/proxy/{type}", svr.ApiProxyByType).Methods("GET")
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
|
||||
"github.com/fatedier/frp/models/config"
|
||||
"github.com/fatedier/frp/models/consts"
|
||||
"github.com/fatedier/frp/models/metrics/mem"
|
||||
"github.com/fatedier/frp/utils/log"
|
||||
"github.com/fatedier/frp/utils/version"
|
||||
|
||||
@ -62,7 +63,7 @@ func (svr *Service) ApiServerInfo(w http.ResponseWriter, r *http.Request) {
|
||||
}()
|
||||
|
||||
log.Info("Http request: [%s]", r.URL.Path)
|
||||
serverStats := svr.statsCollector.GetServer()
|
||||
serverStats := mem.StatsCollector.GetServer()
|
||||
svrResp := ServerInfoResp{
|
||||
Version: version.Full(),
|
||||
BindPort: svr.cfg.BindPort,
|
||||
@ -186,7 +187,7 @@ func (svr *Service) ApiProxyByType(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
func (svr *Service) getProxyStatsByType(proxyType string) (proxyInfos []*ProxyStatsInfo) {
|
||||
proxyStats := svr.statsCollector.GetProxiesByType(proxyType)
|
||||
proxyStats := mem.StatsCollector.GetProxiesByType(proxyType)
|
||||
proxyInfos = make([]*ProxyStatsInfo, 0, len(proxyStats))
|
||||
for _, ps := range proxyStats {
|
||||
proxyInfo := &ProxyStatsInfo{}
|
||||
@ -256,7 +257,7 @@ func (svr *Service) ApiProxyByTypeAndName(w http.ResponseWriter, r *http.Request
|
||||
|
||||
func (svr *Service) getProxyStatsByTypeAndName(proxyType string, proxyName string) (proxyInfo GetProxyStatsResp, code int, msg string) {
|
||||
proxyInfo.Name = proxyName
|
||||
ps := svr.statsCollector.GetProxiesByTypeAndName(proxyType, proxyName)
|
||||
ps := mem.StatsCollector.GetProxiesByTypeAndName(proxyType, proxyName)
|
||||
if ps == nil {
|
||||
code = 404
|
||||
msg = "no proxy info found"
|
||||
@ -314,7 +315,7 @@ func (svr *Service) ApiProxyTraffic(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
trafficResp := GetProxyTrafficResp{}
|
||||
trafficResp.Name = name
|
||||
proxyTrafficInfo := svr.statsCollector.GetProxyTraffic(name)
|
||||
proxyTrafficInfo := mem.StatsCollector.GetProxyTraffic(name)
|
||||
|
||||
if proxyTrafficInfo == nil {
|
||||
res.Code = 404
|
||||
|
37
server/metrics/metrics.go
Normal file
37
server/metrics/metrics.go
Normal file
@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type ServerMetrics interface {
|
||||
NewClient()
|
||||
CloseClient()
|
||||
NewProxy(name string, proxyType string)
|
||||
CloseProxy(name string, proxyType string)
|
||||
OpenConnection(name string, proxyType string)
|
||||
CloseConnection(name string, proxyType string)
|
||||
AddTrafficIn(name string, proxyType string, trafficBytes int64)
|
||||
AddTrafficOut(name string, proxyType string, trafficBytes int64)
|
||||
}
|
||||
|
||||
var Server ServerMetrics = noopServerMetrics{}
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
func Register(m ServerMetrics) {
|
||||
registerMetrics.Do(func() {
|
||||
Server = m
|
||||
})
|
||||
}
|
||||
|
||||
type noopServerMetrics struct{}
|
||||
|
||||
func (noopServerMetrics) NewClient() {}
|
||||
func (noopServerMetrics) CloseClient() {}
|
||||
func (noopServerMetrics) NewProxy(name string, proxyType string) {}
|
||||
func (noopServerMetrics) CloseProxy(name string, proxyType string) {}
|
||||
func (noopServerMetrics) OpenConnection(name string, proxyType string) {}
|
||||
func (noopServerMetrics) CloseConnection(name string, proxyType string) {}
|
||||
func (noopServerMetrics) AddTrafficIn(name string, proxyType string, trafficBytes int64) {}
|
||||
func (noopServerMetrics) AddTrafficOut(name string, proxyType string, trafficBytes int64) {}
|
@ -20,7 +20,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/fatedier/frp/models/config"
|
||||
"github.com/fatedier/frp/server/stats"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
frpNet "github.com/fatedier/frp/utils/net"
|
||||
"github.com/fatedier/frp/utils/util"
|
||||
"github.com/fatedier/frp/utils/vhost"
|
||||
@ -159,21 +159,16 @@ func (pxy *HttpProxy) GetRealConn(remoteAddr string) (workConn net.Conn, err err
|
||||
}
|
||||
workConn = frpNet.WrapReadWriteCloserToConn(rwc, tmpConn)
|
||||
workConn = frpNet.WrapStatsConn(workConn, pxy.updateStatsAfterClosedConn)
|
||||
pxy.statsCollector.Mark(stats.TypeOpenConnection, &stats.OpenConnectionPayload{ProxyName: pxy.GetName()})
|
||||
metrics.Server.OpenConnection(pxy.GetName(), pxy.GetConf().GetBaseInfo().ProxyType)
|
||||
return
|
||||
}
|
||||
|
||||
func (pxy *HttpProxy) updateStatsAfterClosedConn(totalRead, totalWrite int64) {
|
||||
name := pxy.GetName()
|
||||
pxy.statsCollector.Mark(stats.TypeCloseProxy, &stats.CloseConnectionPayload{ProxyName: name})
|
||||
pxy.statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
|
||||
ProxyName: name,
|
||||
TrafficBytes: totalWrite,
|
||||
})
|
||||
pxy.statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
|
||||
ProxyName: name,
|
||||
TrafficBytes: totalRead,
|
||||
})
|
||||
proxyType := pxy.GetConf().GetBaseInfo().ProxyType
|
||||
metrics.Server.CloseConnection(name, proxyType)
|
||||
metrics.Server.AddTrafficIn(name, proxyType, totalWrite)
|
||||
metrics.Server.AddTrafficOut(name, proxyType, totalRead)
|
||||
}
|
||||
|
||||
func (pxy *HttpProxy) Close() {
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"github.com/fatedier/frp/models/config"
|
||||
"github.com/fatedier/frp/models/msg"
|
||||
"github.com/fatedier/frp/server/controller"
|
||||
"github.com/fatedier/frp/server/stats"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
frpNet "github.com/fatedier/frp/utils/net"
|
||||
"github.com/fatedier/frp/utils/xlog"
|
||||
|
||||
@ -45,14 +45,13 @@ type Proxy interface {
|
||||
}
|
||||
|
||||
type BaseProxy struct {
|
||||
name string
|
||||
rc *controller.ResourceController
|
||||
statsCollector stats.Collector
|
||||
listeners []net.Listener
|
||||
usedPortsNum int
|
||||
poolCount int
|
||||
getWorkConnFn GetWorkConnFn
|
||||
serverCfg config.ServerCommonConf
|
||||
name string
|
||||
rc *controller.ResourceController
|
||||
listeners []net.Listener
|
||||
usedPortsNum int
|
||||
poolCount int
|
||||
getWorkConnFn GetWorkConnFn
|
||||
serverCfg config.ServerCommonConf
|
||||
|
||||
mu sync.RWMutex
|
||||
xl *xlog.Logger
|
||||
@ -136,7 +135,7 @@ func (pxy *BaseProxy) GetWorkConnFromPool(src, dst net.Addr) (workConn net.Conn,
|
||||
// startListenHandler start a goroutine handler for each listener.
|
||||
// p: p will just be passed to handler(Proxy, frpNet.Conn).
|
||||
// handler: each proxy type can set different handler function to deal with connections accepted from listeners.
|
||||
func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn, stats.Collector, config.ServerCommonConf)) {
|
||||
func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn, config.ServerCommonConf)) {
|
||||
xl := xlog.FromContextSafe(pxy.ctx)
|
||||
for _, listener := range pxy.listeners {
|
||||
go func(l net.Listener) {
|
||||
@ -149,26 +148,25 @@ func (pxy *BaseProxy) startListenHandler(p Proxy, handler func(Proxy, net.Conn,
|
||||
return
|
||||
}
|
||||
xl.Debug("get a user connection [%s]", c.RemoteAddr().String())
|
||||
go handler(p, c, pxy.statsCollector, pxy.serverCfg)
|
||||
go handler(p, c, pxy.serverCfg)
|
||||
}
|
||||
}(listener)
|
||||
}
|
||||
}
|
||||
|
||||
func NewProxy(ctx context.Context, runId string, rc *controller.ResourceController, statsCollector stats.Collector, poolCount int,
|
||||
func NewProxy(ctx context.Context, runId string, rc *controller.ResourceController, poolCount int,
|
||||
getWorkConnFn GetWorkConnFn, pxyConf config.ProxyConf, serverCfg config.ServerCommonConf) (pxy Proxy, err error) {
|
||||
|
||||
xl := xlog.FromContextSafe(ctx).Spawn().AppendPrefix(pxyConf.GetBaseInfo().ProxyName)
|
||||
basePxy := BaseProxy{
|
||||
name: pxyConf.GetBaseInfo().ProxyName,
|
||||
rc: rc,
|
||||
statsCollector: statsCollector,
|
||||
listeners: make([]net.Listener, 0),
|
||||
poolCount: poolCount,
|
||||
getWorkConnFn: getWorkConnFn,
|
||||
serverCfg: serverCfg,
|
||||
xl: xl,
|
||||
ctx: xlog.NewContext(ctx, xl),
|
||||
name: pxyConf.GetBaseInfo().ProxyName,
|
||||
rc: rc,
|
||||
listeners: make([]net.Listener, 0),
|
||||
poolCount: poolCount,
|
||||
getWorkConnFn: getWorkConnFn,
|
||||
serverCfg: serverCfg,
|
||||
xl: xl,
|
||||
ctx: xlog.NewContext(ctx, xl),
|
||||
}
|
||||
switch cfg := pxyConf.(type) {
|
||||
case *config.TcpProxyConf:
|
||||
@ -216,7 +214,7 @@ func NewProxy(ctx context.Context, runId string, rc *controller.ResourceControll
|
||||
|
||||
// HandleUserTcpConnection is used for incoming tcp user connections.
|
||||
// It can be used for tcp, http, https type.
|
||||
func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, statsCollector stats.Collector, serverCfg config.ServerCommonConf) {
|
||||
func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, serverCfg config.ServerCommonConf) {
|
||||
xl := xlog.FromContextSafe(pxy.Context())
|
||||
defer userConn.Close()
|
||||
|
||||
@ -243,17 +241,13 @@ func HandleUserTcpConnection(pxy Proxy, userConn net.Conn, statsCollector stats.
|
||||
xl.Debug("join connections, workConn(l[%s] r[%s]) userConn(l[%s] r[%s])", workConn.LocalAddr().String(),
|
||||
workConn.RemoteAddr().String(), userConn.LocalAddr().String(), userConn.RemoteAddr().String())
|
||||
|
||||
statsCollector.Mark(stats.TypeOpenConnection, &stats.OpenConnectionPayload{ProxyName: pxy.GetName()})
|
||||
name := pxy.GetName()
|
||||
proxyType := pxy.GetConf().GetBaseInfo().ProxyType
|
||||
metrics.Server.OpenConnection(name, proxyType)
|
||||
inCount, outCount := frpIo.Join(local, userConn)
|
||||
statsCollector.Mark(stats.TypeCloseConnection, &stats.CloseConnectionPayload{ProxyName: pxy.GetName()})
|
||||
statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
|
||||
ProxyName: pxy.GetName(),
|
||||
TrafficBytes: inCount,
|
||||
})
|
||||
statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
|
||||
ProxyName: pxy.GetName(),
|
||||
TrafficBytes: outCount,
|
||||
})
|
||||
metrics.Server.CloseConnection(name, proxyType)
|
||||
metrics.Server.AddTrafficIn(name, proxyType, inCount)
|
||||
metrics.Server.AddTrafficOut(name, proxyType, outCount)
|
||||
xl.Debug("join connections closed")
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"github.com/fatedier/frp/models/config"
|
||||
"github.com/fatedier/frp/models/msg"
|
||||
"github.com/fatedier/frp/models/proto/udp"
|
||||
"github.com/fatedier/frp/server/stats"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
|
||||
"github.com/fatedier/golib/errors"
|
||||
)
|
||||
@ -114,10 +114,11 @@ func (pxy *UdpProxy) Run() (remoteAddr string, err error) {
|
||||
if errRet := errors.PanicToError(func() {
|
||||
xl.Trace("get udp message from workConn: %s", m.Content)
|
||||
pxy.readCh <- m
|
||||
pxy.statsCollector.Mark(stats.TypeAddTrafficOut, &stats.AddTrafficOutPayload{
|
||||
ProxyName: pxy.GetName(),
|
||||
TrafficBytes: int64(len(m.Content)),
|
||||
})
|
||||
metrics.Server.AddTrafficOut(
|
||||
pxy.GetName(),
|
||||
pxy.GetConf().GetBaseInfo().ProxyType,
|
||||
int64(len(m.Content)),
|
||||
)
|
||||
}); errRet != nil {
|
||||
conn.Close()
|
||||
xl.Info("reader goroutine for udp work connection closed")
|
||||
@ -143,10 +144,11 @@ func (pxy *UdpProxy) Run() (remoteAddr string, err error) {
|
||||
return
|
||||
} else {
|
||||
xl.Trace("send message to udp workConn: %s", udpMsg.Content)
|
||||
pxy.statsCollector.Mark(stats.TypeAddTrafficIn, &stats.AddTrafficInPayload{
|
||||
ProxyName: pxy.GetName(),
|
||||
TrafficBytes: int64(len(udpMsg.Content)),
|
||||
})
|
||||
metrics.Server.AddTrafficIn(
|
||||
pxy.GetName(),
|
||||
pxy.GetConf().GetBaseInfo().ProxyType,
|
||||
int64(len(udpMsg.Content)),
|
||||
)
|
||||
continue
|
||||
}
|
||||
case <-ctx.Done():
|
||||
|
@ -32,14 +32,15 @@ import (
|
||||
"github.com/fatedier/frp/assets"
|
||||
"github.com/fatedier/frp/models/auth"
|
||||
"github.com/fatedier/frp/models/config"
|
||||
modelmetrics "github.com/fatedier/frp/models/metrics"
|
||||
"github.com/fatedier/frp/models/msg"
|
||||
"github.com/fatedier/frp/models/nathole"
|
||||
plugin "github.com/fatedier/frp/models/plugin/server"
|
||||
"github.com/fatedier/frp/server/controller"
|
||||
"github.com/fatedier/frp/server/group"
|
||||
"github.com/fatedier/frp/server/metrics"
|
||||
"github.com/fatedier/frp/server/ports"
|
||||
"github.com/fatedier/frp/server/proxy"
|
||||
"github.com/fatedier/frp/server/stats"
|
||||
"github.com/fatedier/frp/utils/log"
|
||||
frpNet "github.com/fatedier/frp/utils/net"
|
||||
"github.com/fatedier/frp/utils/tcpmux"
|
||||
@ -92,9 +93,6 @@ type Service struct {
|
||||
// Verifies authentication based on selected method
|
||||
authVerifier auth.Verifier
|
||||
|
||||
// stats collector to store server and proxies stats info
|
||||
statsCollector stats.Collector
|
||||
|
||||
tlsConfig *tls.Config
|
||||
|
||||
cfg config.ServerCommonConf
|
||||
@ -275,8 +273,12 @@ func NewService(cfg config.ServerCommonConf) (svr *Service, err error) {
|
||||
log.Info("Dashboard listen on %s:%d", cfg.DashboardAddr, cfg.DashboardPort)
|
||||
statsEnable = true
|
||||
}
|
||||
|
||||
svr.statsCollector = stats.NewInternalCollector(statsEnable)
|
||||
if statsEnable {
|
||||
modelmetrics.EnableMem()
|
||||
if cfg.EnablePrometheus {
|
||||
modelmetrics.EnablePrometheus()
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@ -429,8 +431,7 @@ func (svr *Service) RegisterControl(ctlConn net.Conn, loginMsg *msg.Login) (err
|
||||
return
|
||||
}
|
||||
|
||||
ctl := NewControl(ctx, svr.rc, svr.pxyManager, svr.pluginManager, svr.statsCollector, svr.authVerifier, ctlConn, loginMsg, svr.cfg)
|
||||
|
||||
ctl := NewControl(ctx, svr.rc, svr.pxyManager, svr.pluginManager, svr.authVerifier, ctlConn, loginMsg, svr.cfg)
|
||||
if oldCtl := svr.ctlManager.Add(loginMsg.RunId, ctl); oldCtl != nil {
|
||||
oldCtl.allShutdown.WaitDone()
|
||||
}
|
||||
@ -438,7 +439,7 @@ func (svr *Service) RegisterControl(ctlConn net.Conn, loginMsg *msg.Login) (err
|
||||
ctl.Start()
|
||||
|
||||
// for statistics
|
||||
svr.statsCollector.Mark(stats.TypeNewClient, &stats.NewClientPayload{})
|
||||
metrics.Server.NewClient()
|
||||
|
||||
go func() {
|
||||
// block until control closed
|
||||
|
@ -1,277 +0,0 @@
|
||||
// Copyright 2019 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package stats
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fatedier/frp/utils/log"
|
||||
"github.com/fatedier/frp/utils/metric"
|
||||
)
|
||||
|
||||
type internalCollector struct {
|
||||
enable bool
|
||||
info *ServerStatistics
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func NewInternalCollector(enable bool) Collector {
|
||||
return &internalCollector{
|
||||
enable: enable,
|
||||
info: &ServerStatistics{
|
||||
TotalTrafficIn: metric.NewDateCounter(ReserveDays),
|
||||
TotalTrafficOut: metric.NewDateCounter(ReserveDays),
|
||||
CurConns: metric.NewCounter(),
|
||||
|
||||
ClientCounts: metric.NewCounter(),
|
||||
ProxyTypeCounts: make(map[string]metric.Counter),
|
||||
|
||||
ProxyStatistics: make(map[string]*ProxyStatistics),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) Run() error {
|
||||
go func() {
|
||||
for {
|
||||
time.Sleep(12 * time.Hour)
|
||||
log.Debug("start to clear useless proxy statistics data...")
|
||||
collector.ClearUselessInfo()
|
||||
log.Debug("finish to clear useless proxy statistics data")
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (collector *internalCollector) ClearUselessInfo() {
|
||||
// To check if there are proxies that closed than 7 days and drop them.
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
for name, data := range collector.info.ProxyStatistics {
|
||||
if !data.LastCloseTime.IsZero() && time.Since(data.LastCloseTime) > time.Duration(7*24)*time.Hour {
|
||||
delete(collector.info.ProxyStatistics, name)
|
||||
log.Trace("clear proxy [%s]'s statistics data, lastCloseTime: [%s]", name, data.LastCloseTime.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) Mark(statsType StatsType, payload interface{}) {
|
||||
if !collector.enable {
|
||||
return
|
||||
}
|
||||
|
||||
switch v := payload.(type) {
|
||||
case *NewClientPayload:
|
||||
collector.newClient(v)
|
||||
case *CloseClientPayload:
|
||||
collector.closeClient(v)
|
||||
case *NewProxyPayload:
|
||||
collector.newProxy(v)
|
||||
case *CloseProxyPayload:
|
||||
collector.closeProxy(v)
|
||||
case *OpenConnectionPayload:
|
||||
collector.openConnection(v)
|
||||
case *CloseConnectionPayload:
|
||||
collector.closeConnection(v)
|
||||
case *AddTrafficInPayload:
|
||||
collector.addTrafficIn(v)
|
||||
case *AddTrafficOutPayload:
|
||||
collector.addTrafficOut(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) newClient(payload *NewClientPayload) {
|
||||
collector.info.ClientCounts.Inc(1)
|
||||
}
|
||||
|
||||
func (collector *internalCollector) closeClient(payload *CloseClientPayload) {
|
||||
collector.info.ClientCounts.Dec(1)
|
||||
}
|
||||
|
||||
func (collector *internalCollector) newProxy(payload *NewProxyPayload) {
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
counter, ok := collector.info.ProxyTypeCounts[payload.ProxyType]
|
||||
if !ok {
|
||||
counter = metric.NewCounter()
|
||||
}
|
||||
counter.Inc(1)
|
||||
collector.info.ProxyTypeCounts[payload.ProxyType] = counter
|
||||
|
||||
proxyStats, ok := collector.info.ProxyStatistics[payload.Name]
|
||||
if !(ok && proxyStats.ProxyType == payload.ProxyType) {
|
||||
proxyStats = &ProxyStatistics{
|
||||
Name: payload.Name,
|
||||
ProxyType: payload.ProxyType,
|
||||
CurConns: metric.NewCounter(),
|
||||
TrafficIn: metric.NewDateCounter(ReserveDays),
|
||||
TrafficOut: metric.NewDateCounter(ReserveDays),
|
||||
}
|
||||
collector.info.ProxyStatistics[payload.Name] = proxyStats
|
||||
}
|
||||
proxyStats.LastStartTime = time.Now()
|
||||
}
|
||||
|
||||
func (collector *internalCollector) closeProxy(payload *CloseProxyPayload) {
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
if counter, ok := collector.info.ProxyTypeCounts[payload.ProxyType]; ok {
|
||||
counter.Dec(1)
|
||||
}
|
||||
if proxyStats, ok := collector.info.ProxyStatistics[payload.Name]; ok {
|
||||
proxyStats.LastCloseTime = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) openConnection(payload *OpenConnectionPayload) {
|
||||
collector.info.CurConns.Inc(1)
|
||||
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
|
||||
if ok {
|
||||
proxyStats.CurConns.Inc(1)
|
||||
collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) closeConnection(payload *CloseConnectionPayload) {
|
||||
collector.info.CurConns.Dec(1)
|
||||
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
|
||||
if ok {
|
||||
proxyStats.CurConns.Dec(1)
|
||||
collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) addTrafficIn(payload *AddTrafficInPayload) {
|
||||
collector.info.TotalTrafficIn.Inc(payload.TrafficBytes)
|
||||
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
|
||||
proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
|
||||
if ok {
|
||||
proxyStats.TrafficIn.Inc(payload.TrafficBytes)
|
||||
collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) addTrafficOut(payload *AddTrafficOutPayload) {
|
||||
collector.info.TotalTrafficOut.Inc(payload.TrafficBytes)
|
||||
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
|
||||
proxyStats, ok := collector.info.ProxyStatistics[payload.ProxyName]
|
||||
if ok {
|
||||
proxyStats.TrafficOut.Inc(payload.TrafficBytes)
|
||||
collector.info.ProxyStatistics[payload.ProxyName] = proxyStats
|
||||
}
|
||||
}
|
||||
|
||||
func (collector *internalCollector) GetServer() *ServerStats {
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
s := &ServerStats{
|
||||
TotalTrafficIn: collector.info.TotalTrafficIn.TodayCount(),
|
||||
TotalTrafficOut: collector.info.TotalTrafficOut.TodayCount(),
|
||||
CurConns: collector.info.CurConns.Count(),
|
||||
ClientCounts: collector.info.ClientCounts.Count(),
|
||||
ProxyTypeCounts: make(map[string]int64),
|
||||
}
|
||||
for k, v := range collector.info.ProxyTypeCounts {
|
||||
s.ProxyTypeCounts[k] = v.Count()
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func (collector *internalCollector) GetProxiesByType(proxyType string) []*ProxyStats {
|
||||
res := make([]*ProxyStats, 0)
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
|
||||
for name, proxyStats := range collector.info.ProxyStatistics {
|
||||
if proxyStats.ProxyType != proxyType {
|
||||
continue
|
||||
}
|
||||
|
||||
ps := &ProxyStats{
|
||||
Name: name,
|
||||
Type: proxyStats.ProxyType,
|
||||
TodayTrafficIn: proxyStats.TrafficIn.TodayCount(),
|
||||
TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
|
||||
CurConns: proxyStats.CurConns.Count(),
|
||||
}
|
||||
if !proxyStats.LastStartTime.IsZero() {
|
||||
ps.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
|
||||
}
|
||||
if !proxyStats.LastCloseTime.IsZero() {
|
||||
ps.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
|
||||
}
|
||||
res = append(res, ps)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (collector *internalCollector) GetProxiesByTypeAndName(proxyType string, proxyName string) (res *ProxyStats) {
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
|
||||
for name, proxyStats := range collector.info.ProxyStatistics {
|
||||
if proxyStats.ProxyType != proxyType {
|
||||
continue
|
||||
}
|
||||
|
||||
if name != proxyName {
|
||||
continue
|
||||
}
|
||||
|
||||
res = &ProxyStats{
|
||||
Name: name,
|
||||
Type: proxyStats.ProxyType,
|
||||
TodayTrafficIn: proxyStats.TrafficIn.TodayCount(),
|
||||
TodayTrafficOut: proxyStats.TrafficOut.TodayCount(),
|
||||
CurConns: proxyStats.CurConns.Count(),
|
||||
}
|
||||
if !proxyStats.LastStartTime.IsZero() {
|
||||
res.LastStartTime = proxyStats.LastStartTime.Format("01-02 15:04:05")
|
||||
}
|
||||
if !proxyStats.LastCloseTime.IsZero() {
|
||||
res.LastCloseTime = proxyStats.LastCloseTime.Format("01-02 15:04:05")
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (collector *internalCollector) GetProxyTraffic(name string) (res *ProxyTrafficInfo) {
|
||||
collector.mu.Lock()
|
||||
defer collector.mu.Unlock()
|
||||
|
||||
proxyStats, ok := collector.info.ProxyStatistics[name]
|
||||
if ok {
|
||||
res = &ProxyTrafficInfo{
|
||||
Name: name,
|
||||
}
|
||||
res.TrafficIn = proxyStats.TrafficIn.GetLastDaysCount(ReserveDays)
|
||||
res.TrafficOut = proxyStats.TrafficOut.GetLastDaysCount(ReserveDays)
|
||||
}
|
||||
return
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
// Copyright 2020 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
@ -12,19 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errors
|
||||
package metric
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func PanicToError(fn func()) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
err = fmt.Errorf("Panic error: %v", r)
|
||||
}
|
||||
}()
|
||||
|
||||
fn()
|
||||
return
|
||||
// GaugeMetric represents a single numerical value that can arbitrarily go up
|
||||
// and down.
|
||||
type GaugeMetric interface {
|
||||
Inc()
|
||||
Dec()
|
||||
Set(float64)
|
||||
}
|
||||
|
||||
// CounterMetric represents a single numerical value that only ever
|
||||
// goes up.
|
||||
type CounterMetric interface {
|
||||
Inc()
|
||||
}
|
||||
|
||||
// HistogramMetric counts individual observations.
|
||||
type HistogramMetric interface {
|
||||
Observe(float64)
|
||||
}
|
@ -174,7 +174,6 @@ func (rp *HttpReverseProxy) getVhost(domain string, location string) (vr *VhostR
|
||||
}
|
||||
domainSplit = domainSplit[1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (rp *HttpReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
||||
|
@ -116,7 +116,6 @@ func (v *VhostMuxer) getListener(name, path string) (l *Listener, exist bool) {
|
||||
}
|
||||
domainSplit = domainSplit[1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (v *VhostMuxer) run() {
|
||||
|
22
vendor/github.com/armon/go-socks5/.gitignore
generated
vendored
22
vendor/github.com/armon/go-socks5/.gitignore
generated
vendored
@ -1,22 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
4
vendor/github.com/armon/go-socks5/.travis.yml
generated
vendored
4
vendor/github.com/armon/go-socks5/.travis.yml
generated
vendored
@ -1,4 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- tip
|
20
vendor/github.com/armon/go-socks5/LICENSE
generated
vendored
20
vendor/github.com/armon/go-socks5/LICENSE
generated
vendored
@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Armon Dadgar
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
45
vendor/github.com/armon/go-socks5/README.md
generated
vendored
45
vendor/github.com/armon/go-socks5/README.md
generated
vendored
@ -1,45 +0,0 @@
|
||||
go-socks5 [![Build Status](https://travis-ci.org/armon/go-socks5.png)](https://travis-ci.org/armon/go-socks5)
|
||||
=========
|
||||
|
||||
Provides the `socks5` package that implements a [SOCKS5 server](http://en.wikipedia.org/wiki/SOCKS).
|
||||
SOCKS (Secure Sockets) is used to route traffic between a client and server through
|
||||
an intermediate proxy layer. This can be used to bypass firewalls or NATs.
|
||||
|
||||
Feature
|
||||
=======
|
||||
|
||||
The package has the following features:
|
||||
* "No Auth" mode
|
||||
* User/Password authentication
|
||||
* Support for the CONNECT command
|
||||
* Rules to do granular filtering of commands
|
||||
* Custom DNS resolution
|
||||
* Unit tests
|
||||
|
||||
TODO
|
||||
====
|
||||
|
||||
The package still needs the following:
|
||||
* Support for the BIND command
|
||||
* Support for the ASSOCIATE command
|
||||
|
||||
|
||||
Example
|
||||
=======
|
||||
|
||||
Below is a simple example of usage
|
||||
|
||||
```go
|
||||
// Create a SOCKS5 server
|
||||
conf := &socks5.Config{}
|
||||
server, err := socks5.New(conf)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Create SOCKS5 proxy on localhost port 8000
|
||||
if err := server.ListenAndServe("tcp", "127.0.0.1:8000"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
```
|
||||
|
151
vendor/github.com/armon/go-socks5/auth.go
generated
vendored
151
vendor/github.com/armon/go-socks5/auth.go
generated
vendored
@ -1,151 +0,0 @@
|
||||
package socks5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
NoAuth = uint8(0)
|
||||
noAcceptable = uint8(255)
|
||||
UserPassAuth = uint8(2)
|
||||
userAuthVersion = uint8(1)
|
||||
authSuccess = uint8(0)
|
||||
authFailure = uint8(1)
|
||||
)
|
||||
|
||||
var (
|
||||
UserAuthFailed = fmt.Errorf("User authentication failed")
|
||||
NoSupportedAuth = fmt.Errorf("No supported authentication mechanism")
|
||||
)
|
||||
|
||||
// A Request encapsulates authentication state provided
|
||||
// during negotiation
|
||||
type AuthContext struct {
|
||||
// Provided auth method
|
||||
Method uint8
|
||||
// Payload provided during negotiation.
|
||||
// Keys depend on the used auth method.
|
||||
// For UserPassauth contains Username
|
||||
Payload map[string]string
|
||||
}
|
||||
|
||||
type Authenticator interface {
|
||||
Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error)
|
||||
GetCode() uint8
|
||||
}
|
||||
|
||||
// NoAuthAuthenticator is used to handle the "No Authentication" mode
|
||||
type NoAuthAuthenticator struct{}
|
||||
|
||||
func (a NoAuthAuthenticator) GetCode() uint8 {
|
||||
return NoAuth
|
||||
}
|
||||
|
||||
func (a NoAuthAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) {
|
||||
_, err := writer.Write([]byte{socks5Version, NoAuth})
|
||||
return &AuthContext{NoAuth, nil}, err
|
||||
}
|
||||
|
||||
// UserPassAuthenticator is used to handle username/password based
|
||||
// authentication
|
||||
type UserPassAuthenticator struct {
|
||||
Credentials CredentialStore
|
||||
}
|
||||
|
||||
func (a UserPassAuthenticator) GetCode() uint8 {
|
||||
return UserPassAuth
|
||||
}
|
||||
|
||||
func (a UserPassAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) {
|
||||
// Tell the client to use user/pass auth
|
||||
if _, err := writer.Write([]byte{socks5Version, UserPassAuth}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the version and username length
|
||||
header := []byte{0, 0}
|
||||
if _, err := io.ReadAtLeast(reader, header, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Ensure we are compatible
|
||||
if header[0] != userAuthVersion {
|
||||
return nil, fmt.Errorf("Unsupported auth version: %v", header[0])
|
||||
}
|
||||
|
||||
// Get the user name
|
||||
userLen := int(header[1])
|
||||
user := make([]byte, userLen)
|
||||
if _, err := io.ReadAtLeast(reader, user, userLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the password length
|
||||
if _, err := reader.Read(header[:1]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the password
|
||||
passLen := int(header[0])
|
||||
pass := make([]byte, passLen)
|
||||
if _, err := io.ReadAtLeast(reader, pass, passLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Verify the password
|
||||
if a.Credentials.Valid(string(user), string(pass)) {
|
||||
if _, err := writer.Write([]byte{userAuthVersion, authSuccess}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if _, err := writer.Write([]byte{userAuthVersion, authFailure}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, UserAuthFailed
|
||||
}
|
||||
|
||||
// Done
|
||||
return &AuthContext{UserPassAuth, map[string]string{"Username": string(user)}}, nil
|
||||
}
|
||||
|
||||
// authenticate is used to handle connection authentication
|
||||
func (s *Server) authenticate(conn io.Writer, bufConn io.Reader) (*AuthContext, error) {
|
||||
// Get the methods
|
||||
methods, err := readMethods(bufConn)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get auth methods: %v", err)
|
||||
}
|
||||
|
||||
// Select a usable method
|
||||
for _, method := range methods {
|
||||
cator, found := s.authMethods[method]
|
||||
if found {
|
||||
return cator.Authenticate(bufConn, conn)
|
||||
}
|
||||
}
|
||||
|
||||
// No usable method found
|
||||
return nil, noAcceptableAuth(conn)
|
||||
}
|
||||
|
||||
// noAcceptableAuth is used to handle when we have no eligible
|
||||
// authentication mechanism
|
||||
func noAcceptableAuth(conn io.Writer) error {
|
||||
conn.Write([]byte{socks5Version, noAcceptable})
|
||||
return NoSupportedAuth
|
||||
}
|
||||
|
||||
// readMethods is used to read the number of methods
|
||||
// and proceeding auth methods
|
||||
func readMethods(r io.Reader) ([]byte, error) {
|
||||
header := []byte{0}
|
||||
if _, err := r.Read(header); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
numMethods := int(header[0])
|
||||
methods := make([]byte, numMethods)
|
||||
_, err := io.ReadAtLeast(r, methods, numMethods)
|
||||
return methods, err
|
||||
}
|
17
vendor/github.com/armon/go-socks5/credentials.go
generated
vendored
17
vendor/github.com/armon/go-socks5/credentials.go
generated
vendored
@ -1,17 +0,0 @@
|
||||
package socks5
|
||||
|
||||
// CredentialStore is used to support user/pass authentication
|
||||
type CredentialStore interface {
|
||||
Valid(user, password string) bool
|
||||
}
|
||||
|
||||
// StaticCredentials enables using a map directly as a credential store
|
||||
type StaticCredentials map[string]string
|
||||
|
||||
func (s StaticCredentials) Valid(user, password string) bool {
|
||||
pass, ok := s[user]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return password == pass
|
||||
}
|
364
vendor/github.com/armon/go-socks5/request.go
generated
vendored
364
vendor/github.com/armon/go-socks5/request.go
generated
vendored
@ -1,364 +0,0 @@
|
||||
package socks5
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
ConnectCommand = uint8(1)
|
||||
BindCommand = uint8(2)
|
||||
AssociateCommand = uint8(3)
|
||||
ipv4Address = uint8(1)
|
||||
fqdnAddress = uint8(3)
|
||||
ipv6Address = uint8(4)
|
||||
)
|
||||
|
||||
const (
|
||||
successReply uint8 = iota
|
||||
serverFailure
|
||||
ruleFailure
|
||||
networkUnreachable
|
||||
hostUnreachable
|
||||
connectionRefused
|
||||
ttlExpired
|
||||
commandNotSupported
|
||||
addrTypeNotSupported
|
||||
)
|
||||
|
||||
var (
|
||||
unrecognizedAddrType = fmt.Errorf("Unrecognized address type")
|
||||
)
|
||||
|
||||
// AddressRewriter is used to rewrite a destination transparently
|
||||
type AddressRewriter interface {
|
||||
Rewrite(ctx context.Context, request *Request) (context.Context, *AddrSpec)
|
||||
}
|
||||
|
||||
// AddrSpec is used to return the target AddrSpec
|
||||
// which may be specified as IPv4, IPv6, or a FQDN
|
||||
type AddrSpec struct {
|
||||
FQDN string
|
||||
IP net.IP
|
||||
Port int
|
||||
}
|
||||
|
||||
func (a *AddrSpec) String() string {
|
||||
if a.FQDN != "" {
|
||||
return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port)
|
||||
}
|
||||
return fmt.Sprintf("%s:%d", a.IP, a.Port)
|
||||
}
|
||||
|
||||
// Address returns a string suitable to dial; prefer returning IP-based
|
||||
// address, fallback to FQDN
|
||||
func (a AddrSpec) Address() string {
|
||||
if 0 != len(a.IP) {
|
||||
return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port))
|
||||
}
|
||||
return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port))
|
||||
}
|
||||
|
||||
// A Request represents request received by a server
|
||||
type Request struct {
|
||||
// Protocol version
|
||||
Version uint8
|
||||
// Requested command
|
||||
Command uint8
|
||||
// AuthContext provided during negotiation
|
||||
AuthContext *AuthContext
|
||||
// AddrSpec of the the network that sent the request
|
||||
RemoteAddr *AddrSpec
|
||||
// AddrSpec of the desired destination
|
||||
DestAddr *AddrSpec
|
||||
// AddrSpec of the actual destination (might be affected by rewrite)
|
||||
realDestAddr *AddrSpec
|
||||
bufConn io.Reader
|
||||
}
|
||||
|
||||
type conn interface {
|
||||
Write([]byte) (int, error)
|
||||
RemoteAddr() net.Addr
|
||||
}
|
||||
|
||||
// NewRequest creates a new Request from the tcp connection
|
||||
func NewRequest(bufConn io.Reader) (*Request, error) {
|
||||
// Read the version byte
|
||||
header := []byte{0, 0, 0}
|
||||
if _, err := io.ReadAtLeast(bufConn, header, 3); err != nil {
|
||||
return nil, fmt.Errorf("Failed to get command version: %v", err)
|
||||
}
|
||||
|
||||
// Ensure we are compatible
|
||||
if header[0] != socks5Version {
|
||||
return nil, fmt.Errorf("Unsupported command version: %v", header[0])
|
||||
}
|
||||
|
||||
// Read in the destination address
|
||||
dest, err := readAddrSpec(bufConn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
request := &Request{
|
||||
Version: socks5Version,
|
||||
Command: header[1],
|
||||
DestAddr: dest,
|
||||
bufConn: bufConn,
|
||||
}
|
||||
|
||||
return request, nil
|
||||
}
|
||||
|
||||
// handleRequest is used for request processing after authentication
|
||||
func (s *Server) handleRequest(req *Request, conn conn) error {
|
||||
ctx := context.Background()
|
||||
|
||||
// Resolve the address if we have a FQDN
|
||||
dest := req.DestAddr
|
||||
if dest.FQDN != "" {
|
||||
ctx_, addr, err := s.config.Resolver.Resolve(ctx, dest.FQDN)
|
||||
if err != nil {
|
||||
if err := sendReply(conn, hostUnreachable, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Failed to resolve destination '%v': %v", dest.FQDN, err)
|
||||
}
|
||||
ctx = ctx_
|
||||
dest.IP = addr
|
||||
}
|
||||
|
||||
// Apply any address rewrites
|
||||
req.realDestAddr = req.DestAddr
|
||||
if s.config.Rewriter != nil {
|
||||
ctx, req.realDestAddr = s.config.Rewriter.Rewrite(ctx, req)
|
||||
}
|
||||
|
||||
// Switch on the command
|
||||
switch req.Command {
|
||||
case ConnectCommand:
|
||||
return s.handleConnect(ctx, conn, req)
|
||||
case BindCommand:
|
||||
return s.handleBind(ctx, conn, req)
|
||||
case AssociateCommand:
|
||||
return s.handleAssociate(ctx, conn, req)
|
||||
default:
|
||||
if err := sendReply(conn, commandNotSupported, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Unsupported command: %v", req.Command)
|
||||
}
|
||||
}
|
||||
|
||||
// handleConnect is used to handle a connect command
|
||||
func (s *Server) handleConnect(ctx context.Context, conn conn, req *Request) error {
|
||||
// Check if this is allowed
|
||||
if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
|
||||
if err := sendReply(conn, ruleFailure, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Connect to %v blocked by rules", req.DestAddr)
|
||||
} else {
|
||||
ctx = ctx_
|
||||
}
|
||||
|
||||
// Attempt to connect
|
||||
dial := s.config.Dial
|
||||
if dial == nil {
|
||||
dial = func(ctx context.Context, net_, addr string) (net.Conn, error) {
|
||||
return net.Dial(net_, addr)
|
||||
}
|
||||
}
|
||||
target, err := dial(ctx, "tcp", req.realDestAddr.Address())
|
||||
if err != nil {
|
||||
msg := err.Error()
|
||||
resp := hostUnreachable
|
||||
if strings.Contains(msg, "refused") {
|
||||
resp = connectionRefused
|
||||
} else if strings.Contains(msg, "network is unreachable") {
|
||||
resp = networkUnreachable
|
||||
}
|
||||
if err := sendReply(conn, resp, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Connect to %v failed: %v", req.DestAddr, err)
|
||||
}
|
||||
defer target.Close()
|
||||
|
||||
// Send success
|
||||
local := target.LocalAddr().(*net.TCPAddr)
|
||||
bind := AddrSpec{IP: local.IP, Port: local.Port}
|
||||
if err := sendReply(conn, successReply, &bind); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
|
||||
// Start proxying
|
||||
errCh := make(chan error, 2)
|
||||
go proxy(target, req.bufConn, errCh)
|
||||
go proxy(conn, target, errCh)
|
||||
|
||||
// Wait
|
||||
for i := 0; i < 2; i++ {
|
||||
e := <-errCh
|
||||
if e != nil {
|
||||
// return from this function closes target (and conn).
|
||||
return e
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleBind is used to handle a connect command
|
||||
func (s *Server) handleBind(ctx context.Context, conn conn, req *Request) error {
|
||||
// Check if this is allowed
|
||||
if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
|
||||
if err := sendReply(conn, ruleFailure, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Bind to %v blocked by rules", req.DestAddr)
|
||||
} else {
|
||||
ctx = ctx_
|
||||
}
|
||||
|
||||
// TODO: Support bind
|
||||
if err := sendReply(conn, commandNotSupported, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleAssociate is used to handle a connect command
|
||||
func (s *Server) handleAssociate(ctx context.Context, conn conn, req *Request) error {
|
||||
// Check if this is allowed
|
||||
if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok {
|
||||
if err := sendReply(conn, ruleFailure, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return fmt.Errorf("Associate to %v blocked by rules", req.DestAddr)
|
||||
} else {
|
||||
ctx = ctx_
|
||||
}
|
||||
|
||||
// TODO: Support associate
|
||||
if err := sendReply(conn, commandNotSupported, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readAddrSpec is used to read AddrSpec.
|
||||
// Expects an address type byte, follwed by the address and port
|
||||
func readAddrSpec(r io.Reader) (*AddrSpec, error) {
|
||||
d := &AddrSpec{}
|
||||
|
||||
// Get the address type
|
||||
addrType := []byte{0}
|
||||
if _, err := r.Read(addrType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Handle on a per type basis
|
||||
switch addrType[0] {
|
||||
case ipv4Address:
|
||||
addr := make([]byte, 4)
|
||||
if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.IP = net.IP(addr)
|
||||
|
||||
case ipv6Address:
|
||||
addr := make([]byte, 16)
|
||||
if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.IP = net.IP(addr)
|
||||
|
||||
case fqdnAddress:
|
||||
if _, err := r.Read(addrType); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addrLen := int(addrType[0])
|
||||
fqdn := make([]byte, addrLen)
|
||||
if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.FQDN = string(fqdn)
|
||||
|
||||
default:
|
||||
return nil, unrecognizedAddrType
|
||||
}
|
||||
|
||||
// Read the port
|
||||
port := []byte{0, 0}
|
||||
if _, err := io.ReadAtLeast(r, port, 2); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.Port = (int(port[0]) << 8) | int(port[1])
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// sendReply is used to send a reply message
|
||||
func sendReply(w io.Writer, resp uint8, addr *AddrSpec) error {
|
||||
// Format the address
|
||||
var addrType uint8
|
||||
var addrBody []byte
|
||||
var addrPort uint16
|
||||
switch {
|
||||
case addr == nil:
|
||||
addrType = ipv4Address
|
||||
addrBody = []byte{0, 0, 0, 0}
|
||||
addrPort = 0
|
||||
|
||||
case addr.FQDN != "":
|
||||
addrType = fqdnAddress
|
||||
addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...)
|
||||
addrPort = uint16(addr.Port)
|
||||
|
||||
case addr.IP.To4() != nil:
|
||||
addrType = ipv4Address
|
||||
addrBody = []byte(addr.IP.To4())
|
||||
addrPort = uint16(addr.Port)
|
||||
|
||||
case addr.IP.To16() != nil:
|
||||
addrType = ipv6Address
|
||||
addrBody = []byte(addr.IP.To16())
|
||||
addrPort = uint16(addr.Port)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("Failed to format address: %v", addr)
|
||||
}
|
||||
|
||||
// Format the message
|
||||
msg := make([]byte, 6+len(addrBody))
|
||||
msg[0] = socks5Version
|
||||
msg[1] = resp
|
||||
msg[2] = 0 // Reserved
|
||||
msg[3] = addrType
|
||||
copy(msg[4:], addrBody)
|
||||
msg[4+len(addrBody)] = byte(addrPort >> 8)
|
||||
msg[4+len(addrBody)+1] = byte(addrPort & 0xff)
|
||||
|
||||
// Send the message
|
||||
_, err := w.Write(msg)
|
||||
return err
|
||||
}
|
||||
|
||||
type closeWriter interface {
|
||||
CloseWrite() error
|
||||
}
|
||||
|
||||
// proxy is used to suffle data from src to destination, and sends errors
|
||||
// down a dedicated channel
|
||||
func proxy(dst io.Writer, src io.Reader, errCh chan error) {
|
||||
_, err := io.Copy(dst, src)
|
||||
if tcpConn, ok := dst.(closeWriter); ok {
|
||||
tcpConn.CloseWrite()
|
||||
}
|
||||
errCh <- err
|
||||
}
|
23
vendor/github.com/armon/go-socks5/resolver.go
generated
vendored
23
vendor/github.com/armon/go-socks5/resolver.go
generated
vendored
@ -1,23 +0,0 @@
|
||||
package socks5
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// NameResolver is used to implement custom name resolution
|
||||
type NameResolver interface {
|
||||
Resolve(ctx context.Context, name string) (context.Context, net.IP, error)
|
||||
}
|
||||
|
||||
// DNSResolver uses the system DNS to resolve host names
|
||||
type DNSResolver struct{}
|
||||
|
||||
func (d DNSResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) {
|
||||
addr, err := net.ResolveIPAddr("ip", name)
|
||||
if err != nil {
|
||||
return ctx, nil, err
|
||||
}
|
||||
return ctx, addr.IP, err
|
||||
}
|
41
vendor/github.com/armon/go-socks5/ruleset.go
generated
vendored
41
vendor/github.com/armon/go-socks5/ruleset.go
generated
vendored
@ -1,41 +0,0 @@
|
||||
package socks5
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// RuleSet is used to provide custom rules to allow or prohibit actions
|
||||
type RuleSet interface {
|
||||
Allow(ctx context.Context, req *Request) (context.Context, bool)
|
||||
}
|
||||
|
||||
// PermitAll returns a RuleSet which allows all types of connections
|
||||
func PermitAll() RuleSet {
|
||||
return &PermitCommand{true, true, true}
|
||||
}
|
||||
|
||||
// PermitNone returns a RuleSet which disallows all types of connections
|
||||
func PermitNone() RuleSet {
|
||||
return &PermitCommand{false, false, false}
|
||||
}
|
||||
|
||||
// PermitCommand is an implementation of the RuleSet which
|
||||
// enables filtering supported commands
|
||||
type PermitCommand struct {
|
||||
EnableConnect bool
|
||||
EnableBind bool
|
||||
EnableAssociate bool
|
||||
}
|
||||
|
||||
func (p *PermitCommand) Allow(ctx context.Context, req *Request) (context.Context, bool) {
|
||||
switch req.Command {
|
||||
case ConnectCommand:
|
||||
return ctx, p.EnableConnect
|
||||
case BindCommand:
|
||||
return ctx, p.EnableBind
|
||||
case AssociateCommand:
|
||||
return ctx, p.EnableAssociate
|
||||
}
|
||||
|
||||
return ctx, false
|
||||
}
|
169
vendor/github.com/armon/go-socks5/socks5.go
generated
vendored
169
vendor/github.com/armon/go-socks5/socks5.go
generated
vendored
@ -1,169 +0,0 @@
|
||||
package socks5
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
socks5Version = uint8(5)
|
||||
)
|
||||
|
||||
// Config is used to setup and configure a Server
|
||||
type Config struct {
|
||||
// AuthMethods can be provided to implement custom authentication
|
||||
// By default, "auth-less" mode is enabled.
|
||||
// For password-based auth use UserPassAuthenticator.
|
||||
AuthMethods []Authenticator
|
||||
|
||||
// If provided, username/password authentication is enabled,
|
||||
// by appending a UserPassAuthenticator to AuthMethods. If not provided,
|
||||
// and AUthMethods is nil, then "auth-less" mode is enabled.
|
||||
Credentials CredentialStore
|
||||
|
||||
// Resolver can be provided to do custom name resolution.
|
||||
// Defaults to DNSResolver if not provided.
|
||||
Resolver NameResolver
|
||||
|
||||
// Rules is provided to enable custom logic around permitting
|
||||
// various commands. If not provided, PermitAll is used.
|
||||
Rules RuleSet
|
||||
|
||||
// Rewriter can be used to transparently rewrite addresses.
|
||||
// This is invoked before the RuleSet is invoked.
|
||||
// Defaults to NoRewrite.
|
||||
Rewriter AddressRewriter
|
||||
|
||||
// BindIP is used for bind or udp associate
|
||||
BindIP net.IP
|
||||
|
||||
// Logger can be used to provide a custom log target.
|
||||
// Defaults to stdout.
|
||||
Logger *log.Logger
|
||||
|
||||
// Optional function for dialing out
|
||||
Dial func(ctx context.Context, network, addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
// Server is reponsible for accepting connections and handling
|
||||
// the details of the SOCKS5 protocol
|
||||
type Server struct {
|
||||
config *Config
|
||||
authMethods map[uint8]Authenticator
|
||||
}
|
||||
|
||||
// New creates a new Server and potentially returns an error
|
||||
func New(conf *Config) (*Server, error) {
|
||||
// Ensure we have at least one authentication method enabled
|
||||
if len(conf.AuthMethods) == 0 {
|
||||
if conf.Credentials != nil {
|
||||
conf.AuthMethods = []Authenticator{&UserPassAuthenticator{conf.Credentials}}
|
||||
} else {
|
||||
conf.AuthMethods = []Authenticator{&NoAuthAuthenticator{}}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure we have a DNS resolver
|
||||
if conf.Resolver == nil {
|
||||
conf.Resolver = DNSResolver{}
|
||||
}
|
||||
|
||||
// Ensure we have a rule set
|
||||
if conf.Rules == nil {
|
||||
conf.Rules = PermitAll()
|
||||
}
|
||||
|
||||
// Ensure we have a log target
|
||||
if conf.Logger == nil {
|
||||
conf.Logger = log.New(os.Stdout, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
config: conf,
|
||||
}
|
||||
|
||||
server.authMethods = make(map[uint8]Authenticator)
|
||||
|
||||
for _, a := range conf.AuthMethods {
|
||||
server.authMethods[a.GetCode()] = a
|
||||
}
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// ListenAndServe is used to create a listener and serve on it
|
||||
func (s *Server) ListenAndServe(network, addr string) error {
|
||||
l, err := net.Listen(network, addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.Serve(l)
|
||||
}
|
||||
|
||||
// Serve is used to serve connections from a listener
|
||||
func (s *Server) Serve(l net.Listener) error {
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go s.ServeConn(conn)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ServeConn is used to serve a single connection.
|
||||
func (s *Server) ServeConn(conn net.Conn) error {
|
||||
defer conn.Close()
|
||||
bufConn := bufio.NewReader(conn)
|
||||
|
||||
// Read the version byte
|
||||
version := []byte{0}
|
||||
if _, err := bufConn.Read(version); err != nil {
|
||||
s.config.Logger.Printf("[ERR] socks: Failed to get version byte: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure we are compatible
|
||||
if version[0] != socks5Version {
|
||||
err := fmt.Errorf("Unsupported SOCKS version: %v", version)
|
||||
s.config.Logger.Printf("[ERR] socks: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Authenticate the connection
|
||||
authContext, err := s.authenticate(conn, bufConn)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Failed to authenticate: %v", err)
|
||||
s.config.Logger.Printf("[ERR] socks: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
request, err := NewRequest(bufConn)
|
||||
if err != nil {
|
||||
if err == unrecognizedAddrType {
|
||||
if err := sendReply(conn, addrTypeNotSupported, nil); err != nil {
|
||||
return fmt.Errorf("Failed to send reply: %v", err)
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Failed to read destination address: %v", err)
|
||||
}
|
||||
request.AuthContext = authContext
|
||||
if client, ok := conn.RemoteAddr().(*net.TCPAddr); ok {
|
||||
request.RemoteAddr = &AddrSpec{IP: client.IP, Port: client.Port}
|
||||
}
|
||||
|
||||
// Process the client request
|
||||
if err := s.handleRequest(request, conn); err != nil {
|
||||
err = fmt.Errorf("Failed to handle request: %v", err)
|
||||
s.config.Logger.Printf("[ERR] socks: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
@ -1,15 +0,0 @@
|
||||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
152
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
@ -1,152 +0,0 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build !js,!appengine,!safe,!disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
|
||||
// Commit adf9b30e5594 modified the flags to separate the
|
||||
// flagRO flag into two bits which specifies whether or not the
|
||||
// field is embedded. This causes flagIndir to move over a bit
|
||||
// and means that flagRO is the combination of either of the
|
||||
// original flagRO bit and the new bit.
|
||||
//
|
||||
// This code detects the change by extracting what used to be
|
||||
// the indirect bit to ensure it's set. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are
|
||||
// updated accordingly.
|
||||
if upfv&flagIndir == 0 {
|
||||
flagRO = 3 << 5
|
||||
flagIndir = 1 << 7
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
@ -1,341 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
@ -1,306 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
@ -1,211 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
@ -1,509 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
@ -1,419 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
@ -1,148 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
13
vendor/github.com/fatedier/beego/LICENSE
generated
vendored
13
vendor/github.com/fatedier/beego/LICENSE
generated
vendored
@ -1,13 +0,0 @@
|
||||
Copyright 2014 astaxie
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
63
vendor/github.com/fatedier/beego/logs/README.md
generated
vendored
63
vendor/github.com/fatedier/beego/logs/README.md
generated
vendored
@ -1,63 +0,0 @@
|
||||
## logs
|
||||
logs is a Go logs manager. It can use many logs adapters. The repo is inspired by `database/sql` .
|
||||
|
||||
|
||||
## How to install?
|
||||
|
||||
go get github.com/astaxie/beego/logs
|
||||
|
||||
|
||||
## What adapters are supported?
|
||||
|
||||
As of now this logs support console, file,smtp and conn.
|
||||
|
||||
|
||||
## How to use it?
|
||||
|
||||
First you must import it
|
||||
|
||||
import (
|
||||
"github.com/astaxie/beego/logs"
|
||||
)
|
||||
|
||||
Then init a Log (example with console adapter)
|
||||
|
||||
log := NewLogger(10000)
|
||||
log.SetLogger("console", "")
|
||||
|
||||
> the first params stand for how many channel
|
||||
|
||||
Use it like this:
|
||||
|
||||
log.Trace("trace")
|
||||
log.Info("info")
|
||||
log.Warn("warning")
|
||||
log.Debug("debug")
|
||||
log.Critical("critical")
|
||||
|
||||
|
||||
## File adapter
|
||||
|
||||
Configure file adapter like this:
|
||||
|
||||
log := NewLogger(10000)
|
||||
log.SetLogger("file", `{"filename":"test.log"}`)
|
||||
|
||||
|
||||
## Conn adapter
|
||||
|
||||
Configure like this:
|
||||
|
||||
log := NewLogger(1000)
|
||||
log.SetLogger("conn", `{"net":"tcp","addr":":7020"}`)
|
||||
log.Info("info")
|
||||
|
||||
|
||||
## Smtp adapter
|
||||
|
||||
Configure like this:
|
||||
|
||||
log := NewLogger(10000)
|
||||
log.SetLogger("smtp", `{"username":"beegotest@gmail.com","password":"xxxxxxxx","host":"smtp.gmail.com:587","sendTos":["xiemengjun@gmail.com"]}`)
|
||||
log.Critical("sendmail critical")
|
||||
time.Sleep(time.Second * 30)
|
28
vendor/github.com/fatedier/beego/logs/color.go
generated
vendored
28
vendor/github.com/fatedier/beego/logs/color.go
generated
vendored
@ -1,28 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !windows
|
||||
|
||||
package logs
|
||||
|
||||
import "io"
|
||||
|
||||
type ansiColorWriter struct {
|
||||
w io.Writer
|
||||
mode outputMode
|
||||
}
|
||||
|
||||
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
|
||||
return cw.w.Write(p)
|
||||
}
|
428
vendor/github.com/fatedier/beego/logs/color_windows.go
generated
vendored
428
vendor/github.com/fatedier/beego/logs/color_windows.go
generated
vendored
@ -1,428 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build windows
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type (
|
||||
csiState int
|
||||
parseResult int
|
||||
)
|
||||
|
||||
const (
|
||||
outsideCsiCode csiState = iota
|
||||
firstCsiCode
|
||||
secondCsiCode
|
||||
)
|
||||
|
||||
const (
|
||||
noConsole parseResult = iota
|
||||
changedColor
|
||||
unknown
|
||||
)
|
||||
|
||||
type ansiColorWriter struct {
|
||||
w io.Writer
|
||||
mode outputMode
|
||||
state csiState
|
||||
paramStartBuf bytes.Buffer
|
||||
paramBuf bytes.Buffer
|
||||
}
|
||||
|
||||
const (
|
||||
firstCsiChar byte = '\x1b'
|
||||
secondeCsiChar byte = '['
|
||||
separatorChar byte = ';'
|
||||
sgrCode byte = 'm'
|
||||
)
|
||||
|
||||
const (
|
||||
foregroundBlue = uint16(0x0001)
|
||||
foregroundGreen = uint16(0x0002)
|
||||
foregroundRed = uint16(0x0004)
|
||||
foregroundIntensity = uint16(0x0008)
|
||||
backgroundBlue = uint16(0x0010)
|
||||
backgroundGreen = uint16(0x0020)
|
||||
backgroundRed = uint16(0x0040)
|
||||
backgroundIntensity = uint16(0x0080)
|
||||
underscore = uint16(0x8000)
|
||||
|
||||
foregroundMask = foregroundBlue | foregroundGreen | foregroundRed | foregroundIntensity
|
||||
backgroundMask = backgroundBlue | backgroundGreen | backgroundRed | backgroundIntensity
|
||||
)
|
||||
|
||||
const (
|
||||
ansiReset = "0"
|
||||
ansiIntensityOn = "1"
|
||||
ansiIntensityOff = "21"
|
||||
ansiUnderlineOn = "4"
|
||||
ansiUnderlineOff = "24"
|
||||
ansiBlinkOn = "5"
|
||||
ansiBlinkOff = "25"
|
||||
|
||||
ansiForegroundBlack = "30"
|
||||
ansiForegroundRed = "31"
|
||||
ansiForegroundGreen = "32"
|
||||
ansiForegroundYellow = "33"
|
||||
ansiForegroundBlue = "34"
|
||||
ansiForegroundMagenta = "35"
|
||||
ansiForegroundCyan = "36"
|
||||
ansiForegroundWhite = "37"
|
||||
ansiForegroundDefault = "39"
|
||||
|
||||
ansiBackgroundBlack = "40"
|
||||
ansiBackgroundRed = "41"
|
||||
ansiBackgroundGreen = "42"
|
||||
ansiBackgroundYellow = "43"
|
||||
ansiBackgroundBlue = "44"
|
||||
ansiBackgroundMagenta = "45"
|
||||
ansiBackgroundCyan = "46"
|
||||
ansiBackgroundWhite = "47"
|
||||
ansiBackgroundDefault = "49"
|
||||
|
||||
ansiLightForegroundGray = "90"
|
||||
ansiLightForegroundRed = "91"
|
||||
ansiLightForegroundGreen = "92"
|
||||
ansiLightForegroundYellow = "93"
|
||||
ansiLightForegroundBlue = "94"
|
||||
ansiLightForegroundMagenta = "95"
|
||||
ansiLightForegroundCyan = "96"
|
||||
ansiLightForegroundWhite = "97"
|
||||
|
||||
ansiLightBackgroundGray = "100"
|
||||
ansiLightBackgroundRed = "101"
|
||||
ansiLightBackgroundGreen = "102"
|
||||
ansiLightBackgroundYellow = "103"
|
||||
ansiLightBackgroundBlue = "104"
|
||||
ansiLightBackgroundMagenta = "105"
|
||||
ansiLightBackgroundCyan = "106"
|
||||
ansiLightBackgroundWhite = "107"
|
||||
)
|
||||
|
||||
type drawType int
|
||||
|
||||
const (
|
||||
foreground drawType = iota
|
||||
background
|
||||
)
|
||||
|
||||
type winColor struct {
|
||||
code uint16
|
||||
drawType drawType
|
||||
}
|
||||
|
||||
var colorMap = map[string]winColor{
|
||||
ansiForegroundBlack: {0, foreground},
|
||||
ansiForegroundRed: {foregroundRed, foreground},
|
||||
ansiForegroundGreen: {foregroundGreen, foreground},
|
||||
ansiForegroundYellow: {foregroundRed | foregroundGreen, foreground},
|
||||
ansiForegroundBlue: {foregroundBlue, foreground},
|
||||
ansiForegroundMagenta: {foregroundRed | foregroundBlue, foreground},
|
||||
ansiForegroundCyan: {foregroundGreen | foregroundBlue, foreground},
|
||||
ansiForegroundWhite: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
||||
ansiForegroundDefault: {foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
||||
|
||||
ansiBackgroundBlack: {0, background},
|
||||
ansiBackgroundRed: {backgroundRed, background},
|
||||
ansiBackgroundGreen: {backgroundGreen, background},
|
||||
ansiBackgroundYellow: {backgroundRed | backgroundGreen, background},
|
||||
ansiBackgroundBlue: {backgroundBlue, background},
|
||||
ansiBackgroundMagenta: {backgroundRed | backgroundBlue, background},
|
||||
ansiBackgroundCyan: {backgroundGreen | backgroundBlue, background},
|
||||
ansiBackgroundWhite: {backgroundRed | backgroundGreen | backgroundBlue, background},
|
||||
ansiBackgroundDefault: {0, background},
|
||||
|
||||
ansiLightForegroundGray: {foregroundIntensity, foreground},
|
||||
ansiLightForegroundRed: {foregroundIntensity | foregroundRed, foreground},
|
||||
ansiLightForegroundGreen: {foregroundIntensity | foregroundGreen, foreground},
|
||||
ansiLightForegroundYellow: {foregroundIntensity | foregroundRed | foregroundGreen, foreground},
|
||||
ansiLightForegroundBlue: {foregroundIntensity | foregroundBlue, foreground},
|
||||
ansiLightForegroundMagenta: {foregroundIntensity | foregroundRed | foregroundBlue, foreground},
|
||||
ansiLightForegroundCyan: {foregroundIntensity | foregroundGreen | foregroundBlue, foreground},
|
||||
ansiLightForegroundWhite: {foregroundIntensity | foregroundRed | foregroundGreen | foregroundBlue, foreground},
|
||||
|
||||
ansiLightBackgroundGray: {backgroundIntensity, background},
|
||||
ansiLightBackgroundRed: {backgroundIntensity | backgroundRed, background},
|
||||
ansiLightBackgroundGreen: {backgroundIntensity | backgroundGreen, background},
|
||||
ansiLightBackgroundYellow: {backgroundIntensity | backgroundRed | backgroundGreen, background},
|
||||
ansiLightBackgroundBlue: {backgroundIntensity | backgroundBlue, background},
|
||||
ansiLightBackgroundMagenta: {backgroundIntensity | backgroundRed | backgroundBlue, background},
|
||||
ansiLightBackgroundCyan: {backgroundIntensity | backgroundGreen | backgroundBlue, background},
|
||||
ansiLightBackgroundWhite: {backgroundIntensity | backgroundRed | backgroundGreen | backgroundBlue, background},
|
||||
}
|
||||
|
||||
var (
|
||||
kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
|
||||
procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
|
||||
defaultAttr *textAttributes
|
||||
)
|
||||
|
||||
func init() {
|
||||
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
|
||||
if screenInfo != nil {
|
||||
colorMap[ansiForegroundDefault] = winColor{
|
||||
screenInfo.WAttributes & (foregroundRed | foregroundGreen | foregroundBlue),
|
||||
foreground,
|
||||
}
|
||||
colorMap[ansiBackgroundDefault] = winColor{
|
||||
screenInfo.WAttributes & (backgroundRed | backgroundGreen | backgroundBlue),
|
||||
background,
|
||||
}
|
||||
defaultAttr = convertTextAttr(screenInfo.WAttributes)
|
||||
}
|
||||
}
|
||||
|
||||
type coord struct {
|
||||
X, Y int16
|
||||
}
|
||||
|
||||
type smallRect struct {
|
||||
Left, Top, Right, Bottom int16
|
||||
}
|
||||
|
||||
type consoleScreenBufferInfo struct {
|
||||
DwSize coord
|
||||
DwCursorPosition coord
|
||||
WAttributes uint16
|
||||
SrWindow smallRect
|
||||
DwMaximumWindowSize coord
|
||||
}
|
||||
|
||||
func getConsoleScreenBufferInfo(hConsoleOutput uintptr) *consoleScreenBufferInfo {
|
||||
var csbi consoleScreenBufferInfo
|
||||
ret, _, _ := procGetConsoleScreenBufferInfo.Call(
|
||||
hConsoleOutput,
|
||||
uintptr(unsafe.Pointer(&csbi)))
|
||||
if ret == 0 {
|
||||
return nil
|
||||
}
|
||||
return &csbi
|
||||
}
|
||||
|
||||
func setConsoleTextAttribute(hConsoleOutput uintptr, wAttributes uint16) bool {
|
||||
ret, _, _ := procSetConsoleTextAttribute.Call(
|
||||
hConsoleOutput,
|
||||
uintptr(wAttributes))
|
||||
return ret != 0
|
||||
}
|
||||
|
||||
type textAttributes struct {
|
||||
foregroundColor uint16
|
||||
backgroundColor uint16
|
||||
foregroundIntensity uint16
|
||||
backgroundIntensity uint16
|
||||
underscore uint16
|
||||
otherAttributes uint16
|
||||
}
|
||||
|
||||
func convertTextAttr(winAttr uint16) *textAttributes {
|
||||
fgColor := winAttr & (foregroundRed | foregroundGreen | foregroundBlue)
|
||||
bgColor := winAttr & (backgroundRed | backgroundGreen | backgroundBlue)
|
||||
fgIntensity := winAttr & foregroundIntensity
|
||||
bgIntensity := winAttr & backgroundIntensity
|
||||
underline := winAttr & underscore
|
||||
otherAttributes := winAttr &^ (foregroundMask | backgroundMask | underscore)
|
||||
return &textAttributes{fgColor, bgColor, fgIntensity, bgIntensity, underline, otherAttributes}
|
||||
}
|
||||
|
||||
func convertWinAttr(textAttr *textAttributes) uint16 {
|
||||
var winAttr uint16
|
||||
winAttr |= textAttr.foregroundColor
|
||||
winAttr |= textAttr.backgroundColor
|
||||
winAttr |= textAttr.foregroundIntensity
|
||||
winAttr |= textAttr.backgroundIntensity
|
||||
winAttr |= textAttr.underscore
|
||||
winAttr |= textAttr.otherAttributes
|
||||
return winAttr
|
||||
}
|
||||
|
||||
func changeColor(param []byte) parseResult {
|
||||
screenInfo := getConsoleScreenBufferInfo(uintptr(syscall.Stdout))
|
||||
if screenInfo == nil {
|
||||
return noConsole
|
||||
}
|
||||
|
||||
winAttr := convertTextAttr(screenInfo.WAttributes)
|
||||
strParam := string(param)
|
||||
if len(strParam) <= 0 {
|
||||
strParam = "0"
|
||||
}
|
||||
csiParam := strings.Split(strParam, string(separatorChar))
|
||||
for _, p := range csiParam {
|
||||
c, ok := colorMap[p]
|
||||
switch {
|
||||
case !ok:
|
||||
switch p {
|
||||
case ansiReset:
|
||||
winAttr.foregroundColor = defaultAttr.foregroundColor
|
||||
winAttr.backgroundColor = defaultAttr.backgroundColor
|
||||
winAttr.foregroundIntensity = defaultAttr.foregroundIntensity
|
||||
winAttr.backgroundIntensity = defaultAttr.backgroundIntensity
|
||||
winAttr.underscore = 0
|
||||
winAttr.otherAttributes = 0
|
||||
case ansiIntensityOn:
|
||||
winAttr.foregroundIntensity = foregroundIntensity
|
||||
case ansiIntensityOff:
|
||||
winAttr.foregroundIntensity = 0
|
||||
case ansiUnderlineOn:
|
||||
winAttr.underscore = underscore
|
||||
case ansiUnderlineOff:
|
||||
winAttr.underscore = 0
|
||||
case ansiBlinkOn:
|
||||
winAttr.backgroundIntensity = backgroundIntensity
|
||||
case ansiBlinkOff:
|
||||
winAttr.backgroundIntensity = 0
|
||||
default:
|
||||
// unknown code
|
||||
}
|
||||
case c.drawType == foreground:
|
||||
winAttr.foregroundColor = c.code
|
||||
case c.drawType == background:
|
||||
winAttr.backgroundColor = c.code
|
||||
}
|
||||
}
|
||||
winTextAttribute := convertWinAttr(winAttr)
|
||||
setConsoleTextAttribute(uintptr(syscall.Stdout), winTextAttribute)
|
||||
|
||||
return changedColor
|
||||
}
|
||||
|
||||
func parseEscapeSequence(command byte, param []byte) parseResult {
|
||||
if defaultAttr == nil {
|
||||
return noConsole
|
||||
}
|
||||
|
||||
switch command {
|
||||
case sgrCode:
|
||||
return changeColor(param)
|
||||
default:
|
||||
return unknown
|
||||
}
|
||||
}
|
||||
|
||||
func (cw *ansiColorWriter) flushBuffer() (int, error) {
|
||||
return cw.flushTo(cw.w)
|
||||
}
|
||||
|
||||
func (cw *ansiColorWriter) resetBuffer() (int, error) {
|
||||
return cw.flushTo(nil)
|
||||
}
|
||||
|
||||
func (cw *ansiColorWriter) flushTo(w io.Writer) (int, error) {
|
||||
var n1, n2 int
|
||||
var err error
|
||||
|
||||
startBytes := cw.paramStartBuf.Bytes()
|
||||
cw.paramStartBuf.Reset()
|
||||
if w != nil {
|
||||
n1, err = cw.w.Write(startBytes)
|
||||
if err != nil {
|
||||
return n1, err
|
||||
}
|
||||
} else {
|
||||
n1 = len(startBytes)
|
||||
}
|
||||
paramBytes := cw.paramBuf.Bytes()
|
||||
cw.paramBuf.Reset()
|
||||
if w != nil {
|
||||
n2, err = cw.w.Write(paramBytes)
|
||||
if err != nil {
|
||||
return n1 + n2, err
|
||||
}
|
||||
} else {
|
||||
n2 = len(paramBytes)
|
||||
}
|
||||
return n1 + n2, nil
|
||||
}
|
||||
|
||||
func isParameterChar(b byte) bool {
|
||||
return ('0' <= b && b <= '9') || b == separatorChar
|
||||
}
|
||||
|
||||
func (cw *ansiColorWriter) Write(p []byte) (int, error) {
|
||||
r, nw, first, last := 0, 0, 0, 0
|
||||
if cw.mode != DiscardNonColorEscSeq {
|
||||
cw.state = outsideCsiCode
|
||||
cw.resetBuffer()
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, ch := range p {
|
||||
switch cw.state {
|
||||
case outsideCsiCode:
|
||||
if ch == firstCsiChar {
|
||||
cw.paramStartBuf.WriteByte(ch)
|
||||
cw.state = firstCsiCode
|
||||
}
|
||||
case firstCsiCode:
|
||||
switch ch {
|
||||
case firstCsiChar:
|
||||
cw.paramStartBuf.WriteByte(ch)
|
||||
break
|
||||
case secondeCsiChar:
|
||||
cw.paramStartBuf.WriteByte(ch)
|
||||
cw.state = secondCsiCode
|
||||
last = i - 1
|
||||
default:
|
||||
cw.resetBuffer()
|
||||
cw.state = outsideCsiCode
|
||||
}
|
||||
case secondCsiCode:
|
||||
if isParameterChar(ch) {
|
||||
cw.paramBuf.WriteByte(ch)
|
||||
} else {
|
||||
nw, err = cw.w.Write(p[first:last])
|
||||
r += nw
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
first = i + 1
|
||||
result := parseEscapeSequence(ch, cw.paramBuf.Bytes())
|
||||
if result == noConsole || (cw.mode == OutputNonColorEscSeq && result == unknown) {
|
||||
cw.paramBuf.WriteByte(ch)
|
||||
nw, err := cw.flushBuffer()
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
r += nw
|
||||
} else {
|
||||
n, _ := cw.resetBuffer()
|
||||
// Add one more to the size of the buffer for the last ch
|
||||
r += n + 1
|
||||
}
|
||||
|
||||
cw.state = outsideCsiCode
|
||||
}
|
||||
default:
|
||||
cw.state = outsideCsiCode
|
||||
}
|
||||
}
|
||||
|
||||
if cw.mode != DiscardNonColorEscSeq || cw.state == outsideCsiCode {
|
||||
nw, err = cw.w.Write(p[first:])
|
||||
r += nw
|
||||
}
|
||||
|
||||
return r, err
|
||||
}
|
117
vendor/github.com/fatedier/beego/logs/conn.go
generated
vendored
117
vendor/github.com/fatedier/beego/logs/conn.go
generated
vendored
@ -1,117 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
// connWriter implements LoggerInterface.
|
||||
// it writes messages in keep-live tcp connection.
|
||||
type connWriter struct {
|
||||
lg *logWriter
|
||||
innerWriter io.WriteCloser
|
||||
ReconnectOnMsg bool `json:"reconnectOnMsg"`
|
||||
Reconnect bool `json:"reconnect"`
|
||||
Net string `json:"net"`
|
||||
Addr string `json:"addr"`
|
||||
Level int `json:"level"`
|
||||
}
|
||||
|
||||
// NewConn create new ConnWrite returning as LoggerInterface.
|
||||
func NewConn() Logger {
|
||||
conn := new(connWriter)
|
||||
conn.Level = LevelTrace
|
||||
return conn
|
||||
}
|
||||
|
||||
// Init init connection writer with json config.
|
||||
// json config only need key "level".
|
||||
func (c *connWriter) Init(jsonConfig string) error {
|
||||
return json.Unmarshal([]byte(jsonConfig), c)
|
||||
}
|
||||
|
||||
// WriteMsg write message in connection.
|
||||
// if connection is down, try to re-connect.
|
||||
func (c *connWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > c.Level {
|
||||
return nil
|
||||
}
|
||||
if c.needToConnectOnMsg() {
|
||||
err := c.connect()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.ReconnectOnMsg {
|
||||
defer c.innerWriter.Close()
|
||||
}
|
||||
|
||||
c.lg.println(when, msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implementing method. empty.
|
||||
func (c *connWriter) Flush() {
|
||||
|
||||
}
|
||||
|
||||
// Destroy destroy connection writer and close tcp listener.
|
||||
func (c *connWriter) Destroy() {
|
||||
if c.innerWriter != nil {
|
||||
c.innerWriter.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (c *connWriter) connect() error {
|
||||
if c.innerWriter != nil {
|
||||
c.innerWriter.Close()
|
||||
c.innerWriter = nil
|
||||
}
|
||||
|
||||
conn, err := net.Dial(c.Net, c.Addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if tcpConn, ok := conn.(*net.TCPConn); ok {
|
||||
tcpConn.SetKeepAlive(true)
|
||||
}
|
||||
|
||||
c.innerWriter = conn
|
||||
c.lg = newLogWriter(conn)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *connWriter) needToConnectOnMsg() bool {
|
||||
if c.Reconnect {
|
||||
c.Reconnect = false
|
||||
return true
|
||||
}
|
||||
|
||||
if c.innerWriter == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return c.ReconnectOnMsg
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterConn, NewConn)
|
||||
}
|
102
vendor/github.com/fatedier/beego/logs/console.go
generated
vendored
102
vendor/github.com/fatedier/beego/logs/console.go
generated
vendored
@ -1,102 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// brush is a color join function
|
||||
type brush func(string) string
|
||||
|
||||
// newBrush return a fix color Brush
|
||||
func newBrush(color string) brush {
|
||||
pre := "\033["
|
||||
reset := "\033[0m"
|
||||
return func(text string) string {
|
||||
return pre + color + "m" + text + reset
|
||||
}
|
||||
}
|
||||
|
||||
var colors = []brush{
|
||||
newBrush("1;37"), // Emergency white
|
||||
newBrush("1;36"), // Alert cyan
|
||||
newBrush("1;35"), // Critical magenta
|
||||
newBrush("1;31"), // Error red
|
||||
newBrush("1;33"), // Warning yellow
|
||||
newBrush("1;32"), // Notice green
|
||||
newBrush("1;34"), // Informational blue
|
||||
newBrush("1;34"), // Debug blue
|
||||
newBrush("1;34"), // Trace blue
|
||||
}
|
||||
|
||||
// consoleWriter implements LoggerInterface and writes messages to terminal.
|
||||
type consoleWriter struct {
|
||||
lg *logWriter
|
||||
Level int `json:"level"`
|
||||
Colorful bool `json:"color"` //this filed is useful only when system's terminal supports color
|
||||
}
|
||||
|
||||
// NewConsole create ConsoleWriter returning as LoggerInterface.
|
||||
func NewConsole() Logger {
|
||||
cw := &consoleWriter{
|
||||
lg: newLogWriter(os.Stdout),
|
||||
Level: LevelTrace,
|
||||
Colorful: runtime.GOOS != "windows",
|
||||
}
|
||||
return cw
|
||||
}
|
||||
|
||||
// Init init console logger.
|
||||
// jsonConfig like '{"level":LevelTrace}'.
|
||||
func (c *consoleWriter) Init(jsonConfig string) error {
|
||||
if len(jsonConfig) == 0 {
|
||||
return nil
|
||||
}
|
||||
err := json.Unmarshal([]byte(jsonConfig), c)
|
||||
if runtime.GOOS == "windows" {
|
||||
c.Colorful = false
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteMsg write message in console.
|
||||
func (c *consoleWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > c.Level {
|
||||
return nil
|
||||
}
|
||||
if c.Colorful {
|
||||
msg = colors[level](msg)
|
||||
}
|
||||
c.lg.println(when, msg)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy implementing method. empty.
|
||||
func (c *consoleWriter) Destroy() {
|
||||
|
||||
}
|
||||
|
||||
// Flush implementing method. empty.
|
||||
func (c *consoleWriter) Flush() {
|
||||
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterConsole, NewConsole)
|
||||
}
|
327
vendor/github.com/fatedier/beego/logs/file.go
generated
vendored
327
vendor/github.com/fatedier/beego/logs/file.go
generated
vendored
@ -1,327 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// fileLogWriter implements LoggerInterface.
|
||||
// It writes messages by lines limit, file size limit, or time frequency.
|
||||
type fileLogWriter struct {
|
||||
sync.RWMutex // write log order by order and atomic incr maxLinesCurLines and maxSizeCurSize
|
||||
// The opened file
|
||||
Filename string `json:"filename"`
|
||||
fileWriter *os.File
|
||||
|
||||
// Rotate at line
|
||||
MaxLines int `json:"maxlines"`
|
||||
maxLinesCurLines int
|
||||
|
||||
// Rotate at size
|
||||
MaxSize int `json:"maxsize"`
|
||||
maxSizeCurSize int
|
||||
|
||||
// Rotate daily
|
||||
Daily bool `json:"daily"`
|
||||
MaxDays int64 `json:"maxdays"`
|
||||
dailyOpenDate int
|
||||
dailyOpenTime time.Time
|
||||
|
||||
Rotate bool `json:"rotate"`
|
||||
|
||||
Level int `json:"level"`
|
||||
|
||||
Perm string `json:"perm"`
|
||||
|
||||
fileNameOnly, suffix string // like "project.log", project is fileNameOnly and .log is suffix
|
||||
}
|
||||
|
||||
// newFileWriter create a FileLogWriter returning as LoggerInterface.
|
||||
func newFileWriter() Logger {
|
||||
w := &fileLogWriter{
|
||||
Daily: true,
|
||||
MaxDays: 7,
|
||||
Rotate: true,
|
||||
Level: LevelTrace,
|
||||
Perm: "0660",
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
// Init file logger with json config.
|
||||
// jsonConfig like:
|
||||
// {
|
||||
// "filename":"logs/beego.log",
|
||||
// "maxLines":10000,
|
||||
// "maxsize":1024,
|
||||
// "daily":true,
|
||||
// "maxDays":15,
|
||||
// "rotate":true,
|
||||
// "perm":"0600"
|
||||
// }
|
||||
func (w *fileLogWriter) Init(jsonConfig string) error {
|
||||
err := json.Unmarshal([]byte(jsonConfig), w)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(w.Filename) == 0 {
|
||||
return errors.New("jsonconfig must have filename")
|
||||
}
|
||||
w.suffix = filepath.Ext(w.Filename)
|
||||
w.fileNameOnly = strings.TrimSuffix(w.Filename, w.suffix)
|
||||
if w.suffix == "" {
|
||||
w.suffix = ".log"
|
||||
}
|
||||
err = w.startLogger()
|
||||
return err
|
||||
}
|
||||
|
||||
// start file logger. create log file and set to locker-inside file writer.
|
||||
func (w *fileLogWriter) startLogger() error {
|
||||
file, err := w.createLogFile()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if w.fileWriter != nil {
|
||||
w.fileWriter.Close()
|
||||
}
|
||||
w.fileWriter = file
|
||||
return w.initFd()
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) needRotate(size int, day int) bool {
|
||||
return (w.MaxLines > 0 && w.maxLinesCurLines >= w.MaxLines) ||
|
||||
(w.MaxSize > 0 && w.maxSizeCurSize >= w.MaxSize) ||
|
||||
(w.Daily && day != w.dailyOpenDate)
|
||||
|
||||
}
|
||||
|
||||
// WriteMsg write logger message into file.
|
||||
func (w *fileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > w.Level {
|
||||
return nil
|
||||
}
|
||||
h, d := formatTimeHeader(when)
|
||||
msg = string(h) + msg + "\n"
|
||||
if w.Rotate {
|
||||
w.RLock()
|
||||
if w.needRotate(len(msg), d) {
|
||||
w.RUnlock()
|
||||
w.Lock()
|
||||
if w.needRotate(len(msg), d) {
|
||||
if err := w.doRotate(when); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
||||
}
|
||||
}
|
||||
w.Unlock()
|
||||
} else {
|
||||
w.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
w.Lock()
|
||||
_, err := w.fileWriter.Write([]byte(msg))
|
||||
if err == nil {
|
||||
w.maxLinesCurLines++
|
||||
w.maxSizeCurSize += len(msg)
|
||||
}
|
||||
w.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) createLogFile() (*os.File, error) {
|
||||
// Open the log file
|
||||
perm, err := strconv.ParseInt(w.Perm, 8, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fd, err := os.OpenFile(w.Filename, os.O_WRONLY|os.O_APPEND|os.O_CREATE, os.FileMode(perm))
|
||||
if err == nil {
|
||||
// Make sure file perm is user set perm cause of `os.OpenFile` will obey umask
|
||||
os.Chmod(w.Filename, os.FileMode(perm))
|
||||
}
|
||||
return fd, err
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) initFd() error {
|
||||
fd := w.fileWriter
|
||||
fInfo, err := fd.Stat()
|
||||
if err != nil {
|
||||
return fmt.Errorf("get stat err: %s\n", err)
|
||||
}
|
||||
w.maxSizeCurSize = int(fInfo.Size())
|
||||
w.dailyOpenTime = time.Now()
|
||||
w.dailyOpenDate = w.dailyOpenTime.Day()
|
||||
w.maxLinesCurLines = 0
|
||||
if w.Daily {
|
||||
go w.dailyRotate(w.dailyOpenTime)
|
||||
}
|
||||
if fInfo.Size() > 0 {
|
||||
count, err := w.lines()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
w.maxLinesCurLines = count
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) dailyRotate(openTime time.Time) {
|
||||
y, m, d := openTime.Add(24 * time.Hour).Date()
|
||||
nextDay := time.Date(y, m, d, 0, 0, 0, 0, openTime.Location())
|
||||
tm := time.NewTimer(time.Duration(nextDay.UnixNano() - openTime.UnixNano() + 100))
|
||||
select {
|
||||
case <-tm.C:
|
||||
w.Lock()
|
||||
if w.needRotate(0, time.Now().Day()) {
|
||||
if err := w.doRotate(time.Now()); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "FileLogWriter(%q): %s\n", w.Filename, err)
|
||||
}
|
||||
}
|
||||
w.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) lines() (int, error) {
|
||||
fd, err := os.Open(w.Filename)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
buf := make([]byte, 32768) // 32k
|
||||
count := 0
|
||||
lineSep := []byte{'\n'}
|
||||
|
||||
for {
|
||||
c, err := fd.Read(buf)
|
||||
if err != nil && err != io.EOF {
|
||||
return count, err
|
||||
}
|
||||
|
||||
count += bytes.Count(buf[:c], lineSep)
|
||||
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return count, nil
|
||||
}
|
||||
|
||||
// DoRotate means it need to write file in new file.
|
||||
// new file name like xx.2013-01-01.log (daily) or xx.001.log (by line or size)
|
||||
func (w *fileLogWriter) doRotate(logTime time.Time) error {
|
||||
// file exists
|
||||
// Find the next available number
|
||||
num := 1
|
||||
fName := ""
|
||||
|
||||
_, err := os.Lstat(w.Filename)
|
||||
if err != nil {
|
||||
//even if the file is not exist or other ,we should RESTART the logger
|
||||
goto RESTART_LOGGER
|
||||
}
|
||||
|
||||
if w.MaxLines > 0 || w.MaxSize > 0 {
|
||||
for ; err == nil && num <= 999; num++ {
|
||||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", logTime.Format("2006-01-02"), num, w.suffix)
|
||||
_, err = os.Lstat(fName)
|
||||
}
|
||||
} else {
|
||||
fName = fmt.Sprintf("%s.%s%s", w.fileNameOnly, w.dailyOpenTime.Format("2006-01-02"), w.suffix)
|
||||
_, err = os.Lstat(fName)
|
||||
for ; err == nil && num <= 999; num++ {
|
||||
fName = w.fileNameOnly + fmt.Sprintf(".%s.%03d%s", w.dailyOpenTime.Format("2006-01-02"), num, w.suffix)
|
||||
_, err = os.Lstat(fName)
|
||||
}
|
||||
}
|
||||
// return error if the last file checked still existed
|
||||
if err == nil {
|
||||
return fmt.Errorf("Rotate: Cannot find free log number to rename %s\n", w.Filename)
|
||||
}
|
||||
|
||||
// close fileWriter before rename
|
||||
w.fileWriter.Close()
|
||||
|
||||
// Rename the file to its new found name
|
||||
// even if occurs error,we MUST guarantee to restart new logger
|
||||
err = os.Rename(w.Filename, fName)
|
||||
err = os.Chmod(fName, os.FileMode(0440))
|
||||
// re-start logger
|
||||
RESTART_LOGGER:
|
||||
|
||||
startLoggerErr := w.startLogger()
|
||||
go w.deleteOldLog()
|
||||
|
||||
if startLoggerErr != nil {
|
||||
return fmt.Errorf("Rotate StartLogger: %s\n", startLoggerErr)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("Rotate: %s\n", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (w *fileLogWriter) deleteOldLog() {
|
||||
dir := filepath.Dir(w.Filename)
|
||||
filepath.Walk(dir, func(path string, info os.FileInfo, err error) (returnErr error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to delete old log '%s', error: %v\n", path, r)
|
||||
}
|
||||
}()
|
||||
|
||||
if info == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !info.IsDir() && info.ModTime().Add(24*time.Hour*time.Duration(w.MaxDays)).Before(time.Now()) {
|
||||
if strings.HasPrefix(filepath.Base(path), filepath.Base(w.fileNameOnly)) &&
|
||||
strings.HasSuffix(filepath.Base(path), w.suffix) {
|
||||
os.Remove(path)
|
||||
}
|
||||
}
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// Destroy close the file description, close file writer.
|
||||
func (w *fileLogWriter) Destroy() {
|
||||
w.fileWriter.Close()
|
||||
}
|
||||
|
||||
// Flush flush file logger.
|
||||
// there are no buffering messages in file logger in memory.
|
||||
// flush file means sync file from disk.
|
||||
func (w *fileLogWriter) Flush() {
|
||||
w.fileWriter.Sync()
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterFile, newFileWriter)
|
||||
}
|
78
vendor/github.com/fatedier/beego/logs/jianliao.go
generated
vendored
78
vendor/github.com/fatedier/beego/logs/jianliao.go
generated
vendored
@ -1,78 +0,0 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// JLWriter implements beego LoggerInterface and is used to send jiaoliao webhook
|
||||
type JLWriter struct {
|
||||
AuthorName string `json:"authorname"`
|
||||
Title string `json:"title"`
|
||||
WebhookURL string `json:"webhookurl"`
|
||||
RedirectURL string `json:"redirecturl,omitempty"`
|
||||
ImageURL string `json:"imageurl,omitempty"`
|
||||
Level int `json:"level"`
|
||||
}
|
||||
|
||||
// newJLWriter create jiaoliao writer.
|
||||
func newJLWriter() Logger {
|
||||
return &JLWriter{Level: LevelTrace}
|
||||
}
|
||||
|
||||
// Init JLWriter with json config string
|
||||
func (s *JLWriter) Init(jsonconfig string) error {
|
||||
err := json.Unmarshal([]byte(jsonconfig), s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMsg write message in smtp writer.
|
||||
// it will send an email with subject and only this message.
|
||||
func (s *JLWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > s.Level {
|
||||
return nil
|
||||
}
|
||||
|
||||
text := fmt.Sprintf("%s %s", when.Format("2006-01-02 15:04:05"), msg)
|
||||
|
||||
form := url.Values{}
|
||||
form.Add("authorName", s.AuthorName)
|
||||
form.Add("title", s.Title)
|
||||
form.Add("text", text)
|
||||
if s.RedirectURL != "" {
|
||||
form.Add("redirectUrl", s.RedirectURL)
|
||||
}
|
||||
if s.ImageURL != "" {
|
||||
form.Add("imageUrl", s.ImageURL)
|
||||
}
|
||||
|
||||
resp, err := http.PostForm(s.WebhookURL, form)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implementing method. empty.
|
||||
func (s *JLWriter) Flush() {
|
||||
return
|
||||
}
|
||||
|
||||
// Destroy implementing method. empty.
|
||||
func (s *JLWriter) Destroy() {
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterJianLiao, newJLWriter)
|
||||
}
|
657
vendor/github.com/fatedier/beego/logs/log.go
generated
vendored
657
vendor/github.com/fatedier/beego/logs/log.go
generated
vendored
@ -1,657 +0,0 @@
|
||||
// Copyright 2012 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logs provide a general log interface
|
||||
// Usage:
|
||||
//
|
||||
// import "github.com/astaxie/beego/logs"
|
||||
//
|
||||
// log := NewLogger(10000)
|
||||
// log.SetLogger("console", "")
|
||||
//
|
||||
// > the first params stand for how many channel
|
||||
//
|
||||
// Use it like this:
|
||||
//
|
||||
// log.Trace("trace")
|
||||
// log.Info("info")
|
||||
// log.Warn("warning")
|
||||
// log.Debug("debug")
|
||||
// log.Critical("critical")
|
||||
//
|
||||
// more docs http://beego.me/docs/module/logs.md
|
||||
package logs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// RFC5424 log message levels.
|
||||
const (
|
||||
LevelEmergency = iota
|
||||
LevelAlert
|
||||
LevelCritical
|
||||
LevelError
|
||||
LevelWarning
|
||||
LevelNotice
|
||||
LevelInformational
|
||||
LevelDebug
|
||||
LevelTrace
|
||||
)
|
||||
|
||||
// levelLogLogger is defined to implement log.Logger
|
||||
// the real log level will be LevelEmergency
|
||||
const levelLoggerImpl = -1
|
||||
|
||||
// Name for adapter with beego official support
|
||||
const (
|
||||
AdapterConsole = "console"
|
||||
AdapterFile = "file"
|
||||
AdapterMultiFile = "multifile"
|
||||
AdapterMail = "smtp"
|
||||
AdapterConn = "conn"
|
||||
AdapterEs = "es"
|
||||
AdapterJianLiao = "jianliao"
|
||||
AdapterSlack = "slack"
|
||||
AdapterAliLS = "alils"
|
||||
)
|
||||
|
||||
// Legacy log level constants to ensure backwards compatibility.
|
||||
const (
|
||||
LevelInfo = LevelInformational
|
||||
LevelWarn = LevelWarning
|
||||
)
|
||||
|
||||
type newLoggerFunc func() Logger
|
||||
|
||||
// Logger defines the behavior of a log provider.
|
||||
type Logger interface {
|
||||
Init(config string) error
|
||||
WriteMsg(when time.Time, msg string, level int) error
|
||||
Destroy()
|
||||
Flush()
|
||||
}
|
||||
|
||||
var adapters = make(map[string]newLoggerFunc)
|
||||
var levelPrefix = [LevelTrace + 1]string{"[M] ", "[A] ", "[C] ", "[E] ", "[W] ", "[N] ", "[I] ", "[D] ", "[T] "}
|
||||
|
||||
// Register makes a log provide available by the provided name.
|
||||
// If Register is called twice with the same name or if driver is nil,
|
||||
// it panics.
|
||||
func Register(name string, log newLoggerFunc) {
|
||||
if log == nil {
|
||||
panic("logs: Register provide is nil")
|
||||
}
|
||||
if _, dup := adapters[name]; dup {
|
||||
panic("logs: Register called twice for provider " + name)
|
||||
}
|
||||
adapters[name] = log
|
||||
}
|
||||
|
||||
// BeeLogger is default logger in beego application.
|
||||
// it can contain several providers and log message into all providers.
|
||||
type BeeLogger struct {
|
||||
lock sync.Mutex
|
||||
level int
|
||||
init bool
|
||||
enableFuncCallDepth bool
|
||||
loggerFuncCallDepth int
|
||||
asynchronous bool
|
||||
msgChanLen int64
|
||||
msgChan chan *logMsg
|
||||
signalChan chan string
|
||||
wg sync.WaitGroup
|
||||
outputs []*nameLogger
|
||||
}
|
||||
|
||||
const defaultAsyncMsgLen = 1e3
|
||||
|
||||
type nameLogger struct {
|
||||
Logger
|
||||
name string
|
||||
}
|
||||
|
||||
type logMsg struct {
|
||||
level int
|
||||
msg string
|
||||
when time.Time
|
||||
}
|
||||
|
||||
var logMsgPool *sync.Pool
|
||||
|
||||
// NewLogger returns a new BeeLogger.
|
||||
// channelLen means the number of messages in chan(used where asynchronous is true).
|
||||
// if the buffering chan is full, logger adapters write to file or other way.
|
||||
func NewLogger(channelLens ...int64) *BeeLogger {
|
||||
bl := new(BeeLogger)
|
||||
bl.level = LevelDebug
|
||||
bl.loggerFuncCallDepth = 2
|
||||
bl.msgChanLen = append(channelLens, 0)[0]
|
||||
if bl.msgChanLen <= 0 {
|
||||
bl.msgChanLen = defaultAsyncMsgLen
|
||||
}
|
||||
bl.signalChan = make(chan string, 1)
|
||||
bl.setLogger(AdapterConsole)
|
||||
return bl
|
||||
}
|
||||
|
||||
// Async set the log to asynchronous and start the goroutine
|
||||
func (bl *BeeLogger) Async(msgLen ...int64) *BeeLogger {
|
||||
bl.lock.Lock()
|
||||
defer bl.lock.Unlock()
|
||||
if bl.asynchronous {
|
||||
return bl
|
||||
}
|
||||
bl.asynchronous = true
|
||||
if len(msgLen) > 0 && msgLen[0] > 0 {
|
||||
bl.msgChanLen = msgLen[0]
|
||||
}
|
||||
bl.msgChan = make(chan *logMsg, bl.msgChanLen)
|
||||
logMsgPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
return &logMsg{}
|
||||
},
|
||||
}
|
||||
bl.wg.Add(1)
|
||||
go bl.startLogger()
|
||||
return bl
|
||||
}
|
||||
|
||||
// SetLogger provides a given logger adapter into BeeLogger with config string.
|
||||
// config need to be correct JSON as string: {"interval":360}.
|
||||
func (bl *BeeLogger) setLogger(adapterName string, configs ...string) error {
|
||||
config := append(configs, "{}")[0]
|
||||
for _, l := range bl.outputs {
|
||||
if l.name == adapterName {
|
||||
return fmt.Errorf("logs: duplicate adaptername %q (you have set this logger before)", adapterName)
|
||||
}
|
||||
}
|
||||
|
||||
log, ok := adapters[adapterName]
|
||||
if !ok {
|
||||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
|
||||
}
|
||||
|
||||
lg := log()
|
||||
err := lg.Init(config)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "logs.BeeLogger.SetLogger: "+err.Error())
|
||||
return err
|
||||
}
|
||||
bl.outputs = append(bl.outputs, &nameLogger{name: adapterName, Logger: lg})
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLogger provides a given logger adapter into BeeLogger with config string.
|
||||
// config need to be correct JSON as string: {"interval":360}.
|
||||
func (bl *BeeLogger) SetLogger(adapterName string, configs ...string) error {
|
||||
bl.lock.Lock()
|
||||
defer bl.lock.Unlock()
|
||||
if !bl.init {
|
||||
bl.outputs = []*nameLogger{}
|
||||
bl.init = true
|
||||
}
|
||||
return bl.setLogger(adapterName, configs...)
|
||||
}
|
||||
|
||||
// DelLogger remove a logger adapter in BeeLogger.
|
||||
func (bl *BeeLogger) DelLogger(adapterName string) error {
|
||||
bl.lock.Lock()
|
||||
defer bl.lock.Unlock()
|
||||
outputs := []*nameLogger{}
|
||||
for _, lg := range bl.outputs {
|
||||
if lg.name == adapterName {
|
||||
lg.Destroy()
|
||||
} else {
|
||||
outputs = append(outputs, lg)
|
||||
}
|
||||
}
|
||||
if len(outputs) == len(bl.outputs) {
|
||||
return fmt.Errorf("logs: unknown adaptername %q (forgotten Register?)", adapterName)
|
||||
}
|
||||
bl.outputs = outputs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (bl *BeeLogger) writeToLoggers(when time.Time, msg string, level int) {
|
||||
for _, l := range bl.outputs {
|
||||
err := l.WriteMsg(when, msg, level)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "unable to WriteMsg to adapter:%v,error:%v\n", l.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bl *BeeLogger) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// writeMsg will always add a '\n' character
|
||||
if p[len(p)-1] == '\n' {
|
||||
p = p[0 : len(p)-1]
|
||||
}
|
||||
// set levelLoggerImpl to ensure all log message will be write out
|
||||
err = bl.writeMsg(levelLoggerImpl, string(p))
|
||||
if err == nil {
|
||||
return len(p), err
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
|
||||
func (bl *BeeLogger) writeMsg(logLevel int, msg string, v ...interface{}) error {
|
||||
if !bl.init {
|
||||
bl.lock.Lock()
|
||||
bl.setLogger(AdapterConsole)
|
||||
bl.lock.Unlock()
|
||||
}
|
||||
|
||||
if len(v) > 0 {
|
||||
msg = fmt.Sprintf(msg, v...)
|
||||
}
|
||||
when := time.Now()
|
||||
if bl.enableFuncCallDepth {
|
||||
_, file, line, ok := runtime.Caller(bl.loggerFuncCallDepth)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
} else {
|
||||
if strings.Contains(file, "<autogenerated>") {
|
||||
_, file, line, ok = runtime.Caller(bl.loggerFuncCallDepth + 1)
|
||||
if !ok {
|
||||
file = "???"
|
||||
line = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
_, filename := path.Split(file)
|
||||
msg = "[" + filename + ":" + strconv.FormatInt(int64(line), 10) + "] " + msg
|
||||
}
|
||||
|
||||
//set level info in front of filename info
|
||||
if logLevel == levelLoggerImpl {
|
||||
// set to emergency to ensure all log will be print out correctly
|
||||
logLevel = LevelEmergency
|
||||
} else {
|
||||
msg = levelPrefix[logLevel] + msg
|
||||
}
|
||||
|
||||
if bl.asynchronous {
|
||||
lm := logMsgPool.Get().(*logMsg)
|
||||
lm.level = logLevel
|
||||
lm.msg = msg
|
||||
lm.when = when
|
||||
bl.msgChan <- lm
|
||||
} else {
|
||||
bl.writeToLoggers(when, msg, logLevel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetLevel Set log message level.
|
||||
// If message level (such as LevelDebug) is higher than logger level (such as LevelWarning),
|
||||
// log providers will not even be sent the message.
|
||||
func (bl *BeeLogger) SetLevel(l int) {
|
||||
bl.level = l
|
||||
}
|
||||
|
||||
// SetLogFuncCallDepth set log funcCallDepth
|
||||
func (bl *BeeLogger) SetLogFuncCallDepth(d int) {
|
||||
bl.loggerFuncCallDepth = d
|
||||
}
|
||||
|
||||
// GetLogFuncCallDepth return log funcCallDepth for wrapper
|
||||
func (bl *BeeLogger) GetLogFuncCallDepth() int {
|
||||
return bl.loggerFuncCallDepth
|
||||
}
|
||||
|
||||
// EnableFuncCallDepth enable log funcCallDepth
|
||||
func (bl *BeeLogger) EnableFuncCallDepth(b bool) {
|
||||
bl.enableFuncCallDepth = b
|
||||
}
|
||||
|
||||
// start logger chan reading.
|
||||
// when chan is not empty, write logs.
|
||||
func (bl *BeeLogger) startLogger() {
|
||||
gameOver := false
|
||||
for {
|
||||
select {
|
||||
case bm := <-bl.msgChan:
|
||||
bl.writeToLoggers(bm.when, bm.msg, bm.level)
|
||||
logMsgPool.Put(bm)
|
||||
case sg := <-bl.signalChan:
|
||||
// Now should only send "flush" or "close" to bl.signalChan
|
||||
bl.flush()
|
||||
if sg == "close" {
|
||||
for _, l := range bl.outputs {
|
||||
l.Destroy()
|
||||
}
|
||||
bl.outputs = nil
|
||||
gameOver = true
|
||||
}
|
||||
bl.wg.Done()
|
||||
}
|
||||
if gameOver {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Emergency Log EMERGENCY level message.
|
||||
func (bl *BeeLogger) Emergency(format string, v ...interface{}) {
|
||||
if LevelEmergency > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelEmergency, format, v...)
|
||||
}
|
||||
|
||||
// Alert Log ALERT level message.
|
||||
func (bl *BeeLogger) Alert(format string, v ...interface{}) {
|
||||
if LevelAlert > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelAlert, format, v...)
|
||||
}
|
||||
|
||||
// Critical Log CRITICAL level message.
|
||||
func (bl *BeeLogger) Critical(format string, v ...interface{}) {
|
||||
if LevelCritical > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelCritical, format, v...)
|
||||
}
|
||||
|
||||
// Error Log ERROR level message.
|
||||
func (bl *BeeLogger) Error(format string, v ...interface{}) {
|
||||
if LevelError > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelError, format, v...)
|
||||
}
|
||||
|
||||
// Warning Log WARNING level message.
|
||||
func (bl *BeeLogger) Warning(format string, v ...interface{}) {
|
||||
if LevelWarn > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelWarn, format, v...)
|
||||
}
|
||||
|
||||
// Notice Log NOTICE level message.
|
||||
func (bl *BeeLogger) Notice(format string, v ...interface{}) {
|
||||
if LevelNotice > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelNotice, format, v...)
|
||||
}
|
||||
|
||||
// Informational Log INFORMATIONAL level message.
|
||||
func (bl *BeeLogger) Informational(format string, v ...interface{}) {
|
||||
if LevelInfo > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelInfo, format, v...)
|
||||
}
|
||||
|
||||
// Debug Log DEBUG level message.
|
||||
func (bl *BeeLogger) Debug(format string, v ...interface{}) {
|
||||
if LevelDebug > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelDebug, format, v...)
|
||||
}
|
||||
|
||||
// Warn Log WARN level message.
|
||||
// compatibility alias for Warning()
|
||||
func (bl *BeeLogger) Warn(format string, v ...interface{}) {
|
||||
if LevelWarn > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelWarn, format, v...)
|
||||
}
|
||||
|
||||
// Info Log INFO level message.
|
||||
// compatibility alias for Informational()
|
||||
func (bl *BeeLogger) Info(format string, v ...interface{}) {
|
||||
if LevelInfo > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelInfo, format, v...)
|
||||
}
|
||||
|
||||
// Trace Log TRACE level message.
|
||||
// compatibility alias for Debug()
|
||||
func (bl *BeeLogger) Trace(format string, v ...interface{}) {
|
||||
if LevelTrace > bl.level {
|
||||
return
|
||||
}
|
||||
bl.writeMsg(LevelTrace, format, v...)
|
||||
}
|
||||
|
||||
// Flush flush all chan data.
|
||||
func (bl *BeeLogger) Flush() {
|
||||
if bl.asynchronous {
|
||||
bl.signalChan <- "flush"
|
||||
bl.wg.Wait()
|
||||
bl.wg.Add(1)
|
||||
return
|
||||
}
|
||||
bl.flush()
|
||||
}
|
||||
|
||||
// Close close logger, flush all chan data and destroy all adapters in BeeLogger.
|
||||
func (bl *BeeLogger) Close() {
|
||||
if bl.asynchronous {
|
||||
bl.signalChan <- "close"
|
||||
bl.wg.Wait()
|
||||
close(bl.msgChan)
|
||||
} else {
|
||||
bl.flush()
|
||||
for _, l := range bl.outputs {
|
||||
l.Destroy()
|
||||
}
|
||||
bl.outputs = nil
|
||||
}
|
||||
close(bl.signalChan)
|
||||
}
|
||||
|
||||
// Reset close all outputs, and set bl.outputs to nil
|
||||
func (bl *BeeLogger) Reset() {
|
||||
bl.Flush()
|
||||
for _, l := range bl.outputs {
|
||||
l.Destroy()
|
||||
}
|
||||
bl.outputs = nil
|
||||
}
|
||||
|
||||
func (bl *BeeLogger) flush() {
|
||||
if bl.asynchronous {
|
||||
for {
|
||||
if len(bl.msgChan) > 0 {
|
||||
bm := <-bl.msgChan
|
||||
bl.writeToLoggers(bm.when, bm.msg, bm.level)
|
||||
logMsgPool.Put(bm)
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, l := range bl.outputs {
|
||||
l.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
// beeLogger references the used application logger.
|
||||
var beeLogger *BeeLogger = NewLogger()
|
||||
|
||||
// GetLogger returns the default BeeLogger
|
||||
func GetBeeLogger() *BeeLogger {
|
||||
return beeLogger
|
||||
}
|
||||
|
||||
var beeLoggerMap = struct {
|
||||
sync.RWMutex
|
||||
logs map[string]*log.Logger
|
||||
}{
|
||||
logs: map[string]*log.Logger{},
|
||||
}
|
||||
|
||||
// GetLogger returns the default BeeLogger
|
||||
func GetLogger(prefixes ...string) *log.Logger {
|
||||
prefix := append(prefixes, "")[0]
|
||||
if prefix != "" {
|
||||
prefix = fmt.Sprintf(`[%s] `, strings.ToUpper(prefix))
|
||||
}
|
||||
beeLoggerMap.RLock()
|
||||
l, ok := beeLoggerMap.logs[prefix]
|
||||
if ok {
|
||||
beeLoggerMap.RUnlock()
|
||||
return l
|
||||
}
|
||||
beeLoggerMap.RUnlock()
|
||||
beeLoggerMap.Lock()
|
||||
defer beeLoggerMap.Unlock()
|
||||
l, ok = beeLoggerMap.logs[prefix]
|
||||
if !ok {
|
||||
l = log.New(beeLogger, prefix, 0)
|
||||
beeLoggerMap.logs[prefix] = l
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// Reset will remove all the adapter
|
||||
func Reset() {
|
||||
beeLogger.Reset()
|
||||
}
|
||||
|
||||
func Async(msgLen ...int64) *BeeLogger {
|
||||
return beeLogger.Async(msgLen...)
|
||||
}
|
||||
|
||||
// SetLevel sets the global log level used by the simple logger.
|
||||
func SetLevel(l int) {
|
||||
beeLogger.SetLevel(l)
|
||||
}
|
||||
|
||||
// EnableFuncCallDepth enable log funcCallDepth
|
||||
func EnableFuncCallDepth(b bool) {
|
||||
beeLogger.enableFuncCallDepth = b
|
||||
}
|
||||
|
||||
// SetLogFuncCall set the CallDepth, default is 4
|
||||
func SetLogFuncCall(b bool) {
|
||||
beeLogger.EnableFuncCallDepth(b)
|
||||
beeLogger.SetLogFuncCallDepth(4)
|
||||
}
|
||||
|
||||
// SetLogFuncCallDepth set log funcCallDepth
|
||||
func SetLogFuncCallDepth(d int) {
|
||||
beeLogger.loggerFuncCallDepth = d
|
||||
}
|
||||
|
||||
// SetLogger sets a new logger.
|
||||
func SetLogger(adapter string, config ...string) error {
|
||||
err := beeLogger.SetLogger(adapter, config...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Emergency logs a message at emergency level.
|
||||
func Emergency(f interface{}, v ...interface{}) {
|
||||
beeLogger.Emergency(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Alert logs a message at alert level.
|
||||
func Alert(f interface{}, v ...interface{}) {
|
||||
beeLogger.Alert(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Critical logs a message at critical level.
|
||||
func Critical(f interface{}, v ...interface{}) {
|
||||
beeLogger.Critical(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Error logs a message at error level.
|
||||
func Error(f interface{}, v ...interface{}) {
|
||||
beeLogger.Error(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Warning logs a message at warning level.
|
||||
func Warning(f interface{}, v ...interface{}) {
|
||||
beeLogger.Warn(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Warn compatibility alias for Warning()
|
||||
func Warn(f interface{}, v ...interface{}) {
|
||||
beeLogger.Warn(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Notice logs a message at notice level.
|
||||
func Notice(f interface{}, v ...interface{}) {
|
||||
beeLogger.Notice(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Informational logs a message at info level.
|
||||
func Informational(f interface{}, v ...interface{}) {
|
||||
beeLogger.Info(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Info compatibility alias for Warning()
|
||||
func Info(f interface{}, v ...interface{}) {
|
||||
beeLogger.Info(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Debug logs a message at debug level.
|
||||
func Debug(f interface{}, v ...interface{}) {
|
||||
beeLogger.Debug(formatLog(f, v...))
|
||||
}
|
||||
|
||||
// Trace logs a message at trace level.
|
||||
// compatibility alias for Warning()
|
||||
func Trace(f interface{}, v ...interface{}) {
|
||||
beeLogger.Trace(formatLog(f, v...))
|
||||
}
|
||||
|
||||
func formatLog(f interface{}, v ...interface{}) string {
|
||||
var msg string
|
||||
switch f.(type) {
|
||||
case string:
|
||||
msg = f.(string)
|
||||
if len(v) == 0 {
|
||||
return msg
|
||||
}
|
||||
if strings.Contains(msg, "%") && !strings.Contains(msg, "%%") {
|
||||
//format string
|
||||
} else {
|
||||
//do not contain format char
|
||||
msg += strings.Repeat(" %v", len(v))
|
||||
}
|
||||
default:
|
||||
msg = fmt.Sprint(f)
|
||||
if len(v) == 0 {
|
||||
return msg
|
||||
}
|
||||
msg += strings.Repeat(" %v", len(v))
|
||||
}
|
||||
return fmt.Sprintf(msg, v...)
|
||||
}
|
188
vendor/github.com/fatedier/beego/logs/logger.go
generated
vendored
188
vendor/github.com/fatedier/beego/logs/logger.go
generated
vendored
@ -1,188 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type logWriter struct {
|
||||
sync.Mutex
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func newLogWriter(wr io.Writer) *logWriter {
|
||||
return &logWriter{writer: wr}
|
||||
}
|
||||
|
||||
func (lg *logWriter) println(when time.Time, msg string) {
|
||||
lg.Lock()
|
||||
h, _ := formatTimeHeader(when)
|
||||
lg.writer.Write(append(append(h, msg...), '\n'))
|
||||
lg.Unlock()
|
||||
}
|
||||
|
||||
type outputMode int
|
||||
|
||||
// DiscardNonColorEscSeq supports the divided color escape sequence.
|
||||
// But non-color escape sequence is not output.
|
||||
// Please use the OutputNonColorEscSeq If you want to output a non-color
|
||||
// escape sequences such as ncurses. However, it does not support the divided
|
||||
// color escape sequence.
|
||||
const (
|
||||
_ outputMode = iota
|
||||
DiscardNonColorEscSeq
|
||||
OutputNonColorEscSeq
|
||||
)
|
||||
|
||||
// NewAnsiColorWriter creates and initializes a new ansiColorWriter
|
||||
// using io.Writer w as its initial contents.
|
||||
// In the console of Windows, which change the foreground and background
|
||||
// colors of the text by the escape sequence.
|
||||
// In the console of other systems, which writes to w all text.
|
||||
func NewAnsiColorWriter(w io.Writer) io.Writer {
|
||||
return NewModeAnsiColorWriter(w, DiscardNonColorEscSeq)
|
||||
}
|
||||
|
||||
// NewModeAnsiColorWriter create and initializes a new ansiColorWriter
|
||||
// by specifying the outputMode.
|
||||
func NewModeAnsiColorWriter(w io.Writer, mode outputMode) io.Writer {
|
||||
if _, ok := w.(*ansiColorWriter); !ok {
|
||||
return &ansiColorWriter{
|
||||
w: w,
|
||||
mode: mode,
|
||||
}
|
||||
}
|
||||
return w
|
||||
}
|
||||
|
||||
const (
|
||||
y1 = `0123456789`
|
||||
y2 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
|
||||
y3 = `0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999`
|
||||
y4 = `0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789`
|
||||
mo1 = `000000000111`
|
||||
mo2 = `123456789012`
|
||||
d1 = `0000000001111111111222222222233`
|
||||
d2 = `1234567890123456789012345678901`
|
||||
h1 = `000000000011111111112222`
|
||||
h2 = `012345678901234567890123`
|
||||
mi1 = `000000000011111111112222222222333333333344444444445555555555`
|
||||
mi2 = `012345678901234567890123456789012345678901234567890123456789`
|
||||
s1 = `000000000011111111112222222222333333333344444444445555555555`
|
||||
s2 = `012345678901234567890123456789012345678901234567890123456789`
|
||||
)
|
||||
|
||||
func formatTimeHeader(when time.Time) ([]byte, int) {
|
||||
y, mo, d := when.Date()
|
||||
h, mi, s := when.Clock()
|
||||
//len("2006/01/02 15:04:05 ")==20
|
||||
var buf [20]byte
|
||||
|
||||
buf[0] = y1[y/1000%10]
|
||||
buf[1] = y2[y/100]
|
||||
buf[2] = y3[y-y/100*100]
|
||||
buf[3] = y4[y-y/100*100]
|
||||
buf[4] = '/'
|
||||
buf[5] = mo1[mo-1]
|
||||
buf[6] = mo2[mo-1]
|
||||
buf[7] = '/'
|
||||
buf[8] = d1[d-1]
|
||||
buf[9] = d2[d-1]
|
||||
buf[10] = ' '
|
||||
buf[11] = h1[h]
|
||||
buf[12] = h2[h]
|
||||
buf[13] = ':'
|
||||
buf[14] = mi1[mi]
|
||||
buf[15] = mi2[mi]
|
||||
buf[16] = ':'
|
||||
buf[17] = s1[s]
|
||||
buf[18] = s2[s]
|
||||
buf[19] = ' '
|
||||
|
||||
return buf[0:], d
|
||||
}
|
||||
|
||||
var (
|
||||
green = string([]byte{27, 91, 57, 55, 59, 52, 50, 109})
|
||||
white = string([]byte{27, 91, 57, 48, 59, 52, 55, 109})
|
||||
yellow = string([]byte{27, 91, 57, 55, 59, 52, 51, 109})
|
||||
red = string([]byte{27, 91, 57, 55, 59, 52, 49, 109})
|
||||
blue = string([]byte{27, 91, 57, 55, 59, 52, 52, 109})
|
||||
magenta = string([]byte{27, 91, 57, 55, 59, 52, 53, 109})
|
||||
cyan = string([]byte{27, 91, 57, 55, 59, 52, 54, 109})
|
||||
|
||||
w32Green = string([]byte{27, 91, 52, 50, 109})
|
||||
w32White = string([]byte{27, 91, 52, 55, 109})
|
||||
w32Yellow = string([]byte{27, 91, 52, 51, 109})
|
||||
w32Red = string([]byte{27, 91, 52, 49, 109})
|
||||
w32Blue = string([]byte{27, 91, 52, 52, 109})
|
||||
w32Magenta = string([]byte{27, 91, 52, 53, 109})
|
||||
w32Cyan = string([]byte{27, 91, 52, 54, 109})
|
||||
|
||||
reset = string([]byte{27, 91, 48, 109})
|
||||
)
|
||||
|
||||
func ColorByStatus(cond bool, code int) string {
|
||||
switch {
|
||||
case code >= 200 && code < 300:
|
||||
return map[bool]string{true: green, false: w32Green}[cond]
|
||||
case code >= 300 && code < 400:
|
||||
return map[bool]string{true: white, false: w32White}[cond]
|
||||
case code >= 400 && code < 500:
|
||||
return map[bool]string{true: yellow, false: w32Yellow}[cond]
|
||||
default:
|
||||
return map[bool]string{true: red, false: w32Red}[cond]
|
||||
}
|
||||
}
|
||||
|
||||
func ColorByMethod(cond bool, method string) string {
|
||||
switch method {
|
||||
case "GET":
|
||||
return map[bool]string{true: blue, false: w32Blue}[cond]
|
||||
case "POST":
|
||||
return map[bool]string{true: cyan, false: w32Cyan}[cond]
|
||||
case "PUT":
|
||||
return map[bool]string{true: yellow, false: w32Yellow}[cond]
|
||||
case "DELETE":
|
||||
return map[bool]string{true: red, false: w32Red}[cond]
|
||||
case "PATCH":
|
||||
return map[bool]string{true: green, false: w32Green}[cond]
|
||||
case "HEAD":
|
||||
return map[bool]string{true: magenta, false: w32Magenta}[cond]
|
||||
case "OPTIONS":
|
||||
return map[bool]string{true: white, false: w32White}[cond]
|
||||
default:
|
||||
return reset
|
||||
}
|
||||
}
|
||||
|
||||
// Guard Mutex to guarantee atomicity of W32Debug(string) function
|
||||
var mu sync.Mutex
|
||||
|
||||
// Helper method to output colored logs in Windows terminals
|
||||
func W32Debug(msg string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
current := time.Now()
|
||||
w := NewAnsiColorWriter(os.Stdout)
|
||||
|
||||
fmt.Fprintf(w, "[beego] %v %s\n", current.Format("2006/01/02 - 15:04:05"), msg)
|
||||
}
|
116
vendor/github.com/fatedier/beego/logs/multifile.go
generated
vendored
116
vendor/github.com/fatedier/beego/logs/multifile.go
generated
vendored
@ -1,116 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A filesLogWriter manages several fileLogWriter
|
||||
// filesLogWriter will write logs to the file in json configuration and write the same level log to correspond file
|
||||
// means if the file name in configuration is project.log filesLogWriter will create project.error.log/project.debug.log
|
||||
// and write the error-level logs to project.error.log and write the debug-level logs to project.debug.log
|
||||
// the rotate attribute also acts like fileLogWriter
|
||||
type multiFileLogWriter struct {
|
||||
writers [LevelDebug + 1 + 1]*fileLogWriter // the last one for fullLogWriter
|
||||
fullLogWriter *fileLogWriter
|
||||
Separate []string `json:"separate"`
|
||||
}
|
||||
|
||||
var levelNames = [...]string{"emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"}
|
||||
|
||||
// Init file logger with json config.
|
||||
// jsonConfig like:
|
||||
// {
|
||||
// "filename":"logs/beego.log",
|
||||
// "maxLines":0,
|
||||
// "maxsize":0,
|
||||
// "daily":true,
|
||||
// "maxDays":15,
|
||||
// "rotate":true,
|
||||
// "perm":0600,
|
||||
// "separate":["emergency", "alert", "critical", "error", "warning", "notice", "info", "debug"],
|
||||
// }
|
||||
|
||||
func (f *multiFileLogWriter) Init(config string) error {
|
||||
writer := newFileWriter().(*fileLogWriter)
|
||||
err := writer.Init(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.fullLogWriter = writer
|
||||
f.writers[LevelDebug+1] = writer
|
||||
|
||||
//unmarshal "separate" field to f.Separate
|
||||
json.Unmarshal([]byte(config), f)
|
||||
|
||||
jsonMap := map[string]interface{}{}
|
||||
json.Unmarshal([]byte(config), &jsonMap)
|
||||
|
||||
for i := LevelEmergency; i < LevelDebug+1; i++ {
|
||||
for _, v := range f.Separate {
|
||||
if v == levelNames[i] {
|
||||
jsonMap["filename"] = f.fullLogWriter.fileNameOnly + "." + levelNames[i] + f.fullLogWriter.suffix
|
||||
jsonMap["level"] = i
|
||||
bs, _ := json.Marshal(jsonMap)
|
||||
writer = newFileWriter().(*fileLogWriter)
|
||||
writer.Init(string(bs))
|
||||
f.writers[i] = writer
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *multiFileLogWriter) Destroy() {
|
||||
for i := 0; i < len(f.writers); i++ {
|
||||
if f.writers[i] != nil {
|
||||
f.writers[i].Destroy()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *multiFileLogWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if f.fullLogWriter != nil {
|
||||
f.fullLogWriter.WriteMsg(when, msg, level)
|
||||
}
|
||||
for i := 0; i < len(f.writers)-1; i++ {
|
||||
if f.writers[i] != nil {
|
||||
if level == f.writers[i].Level {
|
||||
f.writers[i].WriteMsg(when, msg, level)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *multiFileLogWriter) Flush() {
|
||||
for i := 0; i < len(f.writers); i++ {
|
||||
if f.writers[i] != nil {
|
||||
f.writers[i].Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// newFilesWriter create a FileLogWriter returning as LoggerInterface.
|
||||
func newFilesWriter() Logger {
|
||||
return &multiFileLogWriter{}
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterMultiFile, newFilesWriter)
|
||||
}
|
66
vendor/github.com/fatedier/beego/logs/slack.go
generated
vendored
66
vendor/github.com/fatedier/beego/logs/slack.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
package logs
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SLACKWriter implements beego LoggerInterface and is used to send jiaoliao webhook
|
||||
type SLACKWriter struct {
|
||||
WebhookURL string `json:"webhookurl"`
|
||||
Level int `json:"level"`
|
||||
}
|
||||
|
||||
// newSLACKWriter create jiaoliao writer.
|
||||
func newSLACKWriter() Logger {
|
||||
return &SLACKWriter{Level: LevelTrace}
|
||||
}
|
||||
|
||||
// Init SLACKWriter with json config string
|
||||
func (s *SLACKWriter) Init(jsonconfig string) error {
|
||||
err := json.Unmarshal([]byte(jsonconfig), s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMsg write message in smtp writer.
|
||||
// it will send an email with subject and only this message.
|
||||
func (s *SLACKWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > s.Level {
|
||||
return nil
|
||||
}
|
||||
|
||||
text := fmt.Sprintf("{\"text\": \"%s %s\"}", when.Format("2006-01-02 15:04:05"), msg)
|
||||
|
||||
form := url.Values{}
|
||||
form.Add("payload", text)
|
||||
|
||||
resp, err := http.PostForm(s.WebhookURL, form)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("Post webhook failed %s %d", resp.Status, resp.StatusCode)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flush implementing method. empty.
|
||||
func (s *SLACKWriter) Flush() {
|
||||
return
|
||||
}
|
||||
|
||||
// Destroy implementing method. empty.
|
||||
func (s *SLACKWriter) Destroy() {
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterSlack, newSLACKWriter)
|
||||
}
|
160
vendor/github.com/fatedier/beego/logs/smtp.go
generated
vendored
160
vendor/github.com/fatedier/beego/logs/smtp.go
generated
vendored
@ -1,160 +0,0 @@
|
||||
// Copyright 2014 beego Author. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package logs
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/smtp"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SMTPWriter implements LoggerInterface and is used to send emails via given SMTP-server.
|
||||
type SMTPWriter struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Host string `json:"host"`
|
||||
Subject string `json:"subject"`
|
||||
FromAddress string `json:"fromAddress"`
|
||||
RecipientAddresses []string `json:"sendTos"`
|
||||
Level int `json:"level"`
|
||||
}
|
||||
|
||||
// NewSMTPWriter create smtp writer.
|
||||
func newSMTPWriter() Logger {
|
||||
return &SMTPWriter{Level: LevelTrace}
|
||||
}
|
||||
|
||||
// Init smtp writer with json config.
|
||||
// config like:
|
||||
// {
|
||||
// "username":"example@gmail.com",
|
||||
// "password:"password",
|
||||
// "host":"smtp.gmail.com:465",
|
||||
// "subject":"email title",
|
||||
// "fromAddress":"from@example.com",
|
||||
// "sendTos":["email1","email2"],
|
||||
// "level":LevelError
|
||||
// }
|
||||
func (s *SMTPWriter) Init(jsonconfig string) error {
|
||||
err := json.Unmarshal([]byte(jsonconfig), s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SMTPWriter) getSMTPAuth(host string) smtp.Auth {
|
||||
if len(strings.Trim(s.Username, " ")) == 0 && len(strings.Trim(s.Password, " ")) == 0 {
|
||||
return nil
|
||||
}
|
||||
return smtp.PlainAuth(
|
||||
"",
|
||||
s.Username,
|
||||
s.Password,
|
||||
host,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *SMTPWriter) sendMail(hostAddressWithPort string, auth smtp.Auth, fromAddress string, recipients []string, msgContent []byte) error {
|
||||
client, err := smtp.Dial(hostAddressWithPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
host, _, _ := net.SplitHostPort(hostAddressWithPort)
|
||||
tlsConn := &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
ServerName: host,
|
||||
}
|
||||
if err = client.StartTLS(tlsConn); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if auth != nil {
|
||||
if err = client.Auth(auth); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err = client.Mail(fromAddress); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, rec := range recipients {
|
||||
if err = client.Rcpt(rec); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
w, err := client.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write([]byte(msgContent))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = w.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = client.Quit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// WriteMsg write message in smtp writer.
|
||||
// it will send an email with subject and only this message.
|
||||
func (s *SMTPWriter) WriteMsg(when time.Time, msg string, level int) error {
|
||||
if level > s.Level {
|
||||
return nil
|
||||
}
|
||||
|
||||
hp := strings.Split(s.Host, ":")
|
||||
|
||||
// Set up authentication information.
|
||||
auth := s.getSMTPAuth(hp[0])
|
||||
|
||||
// Connect to the server, authenticate, set the sender and recipient,
|
||||
// and send the email all in one step.
|
||||
contentType := "Content-Type: text/plain" + "; charset=UTF-8"
|
||||
mailmsg := []byte("To: " + strings.Join(s.RecipientAddresses, ";") + "\r\nFrom: " + s.FromAddress + "<" + s.FromAddress +
|
||||
">\r\nSubject: " + s.Subject + "\r\n" + contentType + "\r\n\r\n" + fmt.Sprintf(".%s", when.Format("2006-01-02 15:04:05")) + msg)
|
||||
|
||||
return s.sendMail(s.Host, auth, s.FromAddress, s.RecipientAddresses, mailmsg)
|
||||
}
|
||||
|
||||
// Flush implementing method. empty.
|
||||
func (s *SMTPWriter) Flush() {
|
||||
return
|
||||
}
|
||||
|
||||
// Destroy implementing method. empty.
|
||||
func (s *SMTPWriter) Destroy() {
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
Register(AdapterMail, newSMTPWriter)
|
||||
}
|
201
vendor/github.com/fatedier/golib/LICENSE
generated
vendored
201
vendor/github.com/fatedier/golib/LICENSE
generated
vendored
@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
62
vendor/github.com/fatedier/golib/control/shutdown/shutdown.go
generated
vendored
62
vendor/github.com/fatedier/golib/control/shutdown/shutdown.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
// Copyright 2017 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package shutdown
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Shutdown struct {
|
||||
doing bool
|
||||
ending bool
|
||||
startCh chan struct{}
|
||||
doneCh chan struct{}
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func New() *Shutdown {
|
||||
return &Shutdown{
|
||||
doing: false,
|
||||
ending: false,
|
||||
startCh: make(chan struct{}),
|
||||
doneCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Shutdown) Start() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.doing {
|
||||
s.doing = true
|
||||
close(s.startCh)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Shutdown) WaitStart() {
|
||||
<-s.startCh
|
||||
}
|
||||
|
||||
func (s *Shutdown) Done() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if !s.ending {
|
||||
s.ending = true
|
||||
close(s.doneCh)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Shutdown) WaitDone() {
|
||||
<-s.doneCh
|
||||
}
|
75
vendor/github.com/fatedier/golib/crypto/decode.go
generated
vendored
75
vendor/github.com/fatedier/golib/crypto/decode.go
generated
vendored
@ -1,75 +0,0 @@
|
||||
// Copyright 2017 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
// NewReader returns a new Reader that decrypts bytes from r
|
||||
func NewReader(r io.Reader, key []byte) *Reader {
|
||||
key = pbkdf2.Key(key, []byte(DefaultSalt), 64, aes.BlockSize, sha1.New)
|
||||
|
||||
return &Reader{
|
||||
r: r,
|
||||
key: key,
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read encrypted bytes.
|
||||
// Now it only supports aes-128-cfb.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
dec *cipher.StreamReader
|
||||
key []byte
|
||||
iv []byte
|
||||
err error
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (nRet int, errRet error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
if r.dec == nil {
|
||||
iv := make([]byte, aes.BlockSize)
|
||||
if _, errRet = io.ReadFull(r.r, iv); errRet != nil {
|
||||
return
|
||||
}
|
||||
r.iv = iv
|
||||
|
||||
block, err := aes.NewCipher(r.key)
|
||||
if err != nil {
|
||||
errRet = err
|
||||
return
|
||||
}
|
||||
r.dec = &cipher.StreamReader{
|
||||
S: cipher.NewCFBDecrypter(block, iv),
|
||||
R: r.r,
|
||||
}
|
||||
}
|
||||
|
||||
nRet, errRet = r.dec.Read(p)
|
||||
if errRet != nil {
|
||||
r.err = errRet
|
||||
}
|
||||
return
|
||||
}
|
89
vendor/github.com/fatedier/golib/crypto/encode.go
generated
vendored
89
vendor/github.com/fatedier/golib/crypto/encode.go
generated
vendored
@ -1,89 +0,0 @@
|
||||
// Copyright 2017 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/rand"
|
||||
"crypto/sha1"
|
||||
"io"
|
||||
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
)
|
||||
|
||||
var (
|
||||
DefaultSalt = "crypto"
|
||||
)
|
||||
|
||||
// NewWriter returns a new Writer that encrypts bytes to w.
|
||||
func NewWriter(w io.Writer, key []byte) (*Writer, error) {
|
||||
key = pbkdf2.Key(key, []byte(DefaultSalt), 64, aes.BlockSize, sha1.New)
|
||||
|
||||
// random iv
|
||||
iv := make([]byte, aes.BlockSize)
|
||||
if _, err := io.ReadFull(rand.Reader, iv); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Writer{
|
||||
w: w,
|
||||
enc: &cipher.StreamWriter{
|
||||
S: cipher.NewCFBEncrypter(block, iv),
|
||||
W: w,
|
||||
},
|
||||
key: key,
|
||||
iv: iv,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Writer is an io.Writer that can write encrypted bytes.
|
||||
// Now it only support aes-128-cfb.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
enc *cipher.StreamWriter
|
||||
key []byte
|
||||
iv []byte
|
||||
ivSend bool
|
||||
err error
|
||||
}
|
||||
|
||||
// Write satisfies the io.Writer interface.
|
||||
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
}
|
||||
|
||||
// When write is first called, iv will be written to w.w
|
||||
if !w.ivSend {
|
||||
w.ivSend = true
|
||||
_, errRet = w.w.Write(w.iv)
|
||||
if errRet != nil {
|
||||
w.err = errRet
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
nRet, errRet = w.enc.Write(p)
|
||||
if errRet != nil {
|
||||
w.err = errRet
|
||||
}
|
||||
return
|
||||
}
|
124
vendor/github.com/fatedier/golib/io/io.go
generated
vendored
124
vendor/github.com/fatedier/golib/io/io.go
generated
vendored
@ -1,124 +0,0 @@
|
||||
// Copyright 2017 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package io
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/fatedier/golib/crypto"
|
||||
"github.com/fatedier/golib/pool"
|
||||
)
|
||||
|
||||
// Join two io.ReadWriteCloser and do some operations.
|
||||
func Join(c1 io.ReadWriteCloser, c2 io.ReadWriteCloser) (inCount int64, outCount int64) {
|
||||
var wait sync.WaitGroup
|
||||
pipe := func(to io.ReadWriteCloser, from io.ReadWriteCloser, count *int64) {
|
||||
defer to.Close()
|
||||
defer from.Close()
|
||||
defer wait.Done()
|
||||
|
||||
buf := pool.GetBuf(16 * 1024)
|
||||
defer pool.PutBuf(buf)
|
||||
*count, _ = io.CopyBuffer(to, from, buf)
|
||||
}
|
||||
|
||||
wait.Add(2)
|
||||
go pipe(c1, c2, &inCount)
|
||||
go pipe(c2, c1, &outCount)
|
||||
wait.Wait()
|
||||
return
|
||||
}
|
||||
|
||||
func WithEncryption(rwc io.ReadWriteCloser, key []byte) (io.ReadWriteCloser, error) {
|
||||
w, err := crypto.NewWriter(rwc, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return WrapReadWriteCloser(crypto.NewReader(rwc, key), w, func() error {
|
||||
return rwc.Close()
|
||||
}), nil
|
||||
}
|
||||
|
||||
func WithCompression(rwc io.ReadWriteCloser) io.ReadWriteCloser {
|
||||
sr := pool.GetSnappyReader(rwc)
|
||||
sw := pool.GetSnappyWriter(rwc)
|
||||
return WrapReadWriteCloser(sr, sw, func() error {
|
||||
err := rwc.Close()
|
||||
pool.PutSnappyReader(sr)
|
||||
pool.PutSnappyWriter(sw)
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
type ReadWriteCloser struct {
|
||||
r io.Reader
|
||||
w io.Writer
|
||||
closeFn func() error
|
||||
|
||||
closed bool
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// closeFn will be called only once
|
||||
func WrapReadWriteCloser(r io.Reader, w io.Writer, closeFn func() error) io.ReadWriteCloser {
|
||||
return &ReadWriteCloser{
|
||||
r: r,
|
||||
w: w,
|
||||
closeFn: closeFn,
|
||||
closed: false,
|
||||
}
|
||||
}
|
||||
|
||||
func (rwc *ReadWriteCloser) Read(p []byte) (n int, err error) {
|
||||
return rwc.r.Read(p)
|
||||
}
|
||||
|
||||
func (rwc *ReadWriteCloser) Write(p []byte) (n int, err error) {
|
||||
return rwc.w.Write(p)
|
||||
}
|
||||
|
||||
func (rwc *ReadWriteCloser) Close() (errRet error) {
|
||||
rwc.mu.Lock()
|
||||
if rwc.closed {
|
||||
rwc.mu.Unlock()
|
||||
return
|
||||
}
|
||||
rwc.closed = true
|
||||
rwc.mu.Unlock()
|
||||
|
||||
var err error
|
||||
if rc, ok := rwc.r.(io.Closer); ok {
|
||||
err = rc.Close()
|
||||
if err != nil {
|
||||
errRet = err
|
||||
}
|
||||
}
|
||||
|
||||
if wc, ok := rwc.w.(io.Closer); ok {
|
||||
err = wc.Close()
|
||||
if err != nil {
|
||||
errRet = err
|
||||
}
|
||||
}
|
||||
|
||||
if rwc.closeFn != nil {
|
||||
err = rwc.closeFn()
|
||||
if err != nil {
|
||||
errRet = err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
50
vendor/github.com/fatedier/golib/msg/json/msg.go
generated
vendored
50
vendor/github.com/fatedier/golib/msg/json/msg.go
generated
vendored
@ -1,50 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultMaxMsgLength int64 = 10240
|
||||
)
|
||||
|
||||
// Message wraps socket packages for communicating between frpc and frps.
|
||||
type Message interface{}
|
||||
|
||||
type MsgCtl struct {
|
||||
typeMap map[byte]reflect.Type
|
||||
typeByteMap map[reflect.Type]byte
|
||||
|
||||
maxMsgLength int64
|
||||
}
|
||||
|
||||
func NewMsgCtl() *MsgCtl {
|
||||
return &MsgCtl{
|
||||
typeMap: make(map[byte]reflect.Type),
|
||||
typeByteMap: make(map[reflect.Type]byte),
|
||||
maxMsgLength: defaultMaxMsgLength,
|
||||
}
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) RegisterMsg(typeByte byte, msg interface{}) {
|
||||
msgCtl.typeMap[typeByte] = reflect.TypeOf(msg)
|
||||
msgCtl.typeByteMap[reflect.TypeOf(msg)] = typeByte
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) SetMaxMsgLength(length int64) {
|
||||
msgCtl.maxMsgLength = length
|
||||
}
|
66
vendor/github.com/fatedier/golib/msg/json/pack.go
generated
vendored
66
vendor/github.com/fatedier/golib/msg/json/pack.go
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func (msgCtl *MsgCtl) unpack(typeByte byte, buffer []byte, msgIn Message) (msg Message, err error) {
|
||||
if msgIn == nil {
|
||||
t, ok := msgCtl.typeMap[typeByte]
|
||||
if !ok {
|
||||
err = ErrMsgType
|
||||
return
|
||||
}
|
||||
|
||||
msg = reflect.New(t).Interface().(Message)
|
||||
} else {
|
||||
msg = msgIn
|
||||
}
|
||||
|
||||
err = json.Unmarshal(buffer, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) UnPackInto(buffer []byte, msg Message) (err error) {
|
||||
_, err = msgCtl.unpack(' ', buffer, msg)
|
||||
return
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) UnPack(typeByte byte, buffer []byte) (msg Message, err error) {
|
||||
return msgCtl.unpack(typeByte, buffer, nil)
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) Pack(msg Message) ([]byte, error) {
|
||||
typeByte, ok := msgCtl.typeByteMap[reflect.TypeOf(msg).Elem()]
|
||||
if !ok {
|
||||
return nil, ErrMsgType
|
||||
}
|
||||
|
||||
content, err := json.Marshal(msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
buffer.WriteByte(typeByte)
|
||||
binary.Write(buffer, binary.BigEndian, int64(len(content)))
|
||||
buffer.Write(content)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
93
vendor/github.com/fatedier/golib/msg/json/process.go
generated
vendored
93
vendor/github.com/fatedier/golib/msg/json/process.go
generated
vendored
@ -1,93 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrMsgType = errors.New("message type error")
|
||||
ErrMaxMsgLength = errors.New("message length exceed the limit")
|
||||
ErrMsgLength = errors.New("message length error")
|
||||
ErrMsgFormat = errors.New("message format error")
|
||||
)
|
||||
|
||||
func (msgCtl *MsgCtl) readMsg(c io.Reader) (typeByte byte, buffer []byte, err error) {
|
||||
buffer = make([]byte, 1)
|
||||
_, err = c.Read(buffer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
typeByte = buffer[0]
|
||||
if _, ok := msgCtl.typeMap[typeByte]; !ok {
|
||||
err = ErrMsgType
|
||||
return
|
||||
}
|
||||
|
||||
var length int64
|
||||
err = binary.Read(c, binary.BigEndian, &length)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if length > msgCtl.maxMsgLength {
|
||||
err = ErrMaxMsgLength
|
||||
return
|
||||
} else if length < 0 {
|
||||
err = ErrMsgLength
|
||||
return
|
||||
}
|
||||
|
||||
buffer = make([]byte, length)
|
||||
n, err := io.ReadFull(c, buffer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if int64(n) != length {
|
||||
err = ErrMsgFormat
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) ReadMsg(c io.Reader) (msg Message, err error) {
|
||||
typeByte, buffer, err := msgCtl.readMsg(c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return msgCtl.UnPack(typeByte, buffer)
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) ReadMsgInto(c io.Reader, msg Message) (err error) {
|
||||
_, buffer, err := msgCtl.readMsg(c)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return msgCtl.UnPackInto(buffer, msg)
|
||||
}
|
||||
|
||||
func (msgCtl *MsgCtl) WriteMsg(c io.Writer, msg interface{}) (err error) {
|
||||
buffer, err := msgCtl.Pack(msg)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = c.Write(buffer); err != nil {
|
||||
return
|
||||
}
|
||||
return nil
|
||||
}
|
64
vendor/github.com/fatedier/golib/net/conn.go
generated
vendored
64
vendor/github.com/fatedier/golib/net/conn.go
generated
vendored
@ -1,64 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
type SharedConn struct {
|
||||
net.Conn
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// the bytes you read in io.Reader, will be reserved in SharedConn
|
||||
func NewSharedConn(conn net.Conn) (*SharedConn, io.Reader) {
|
||||
sc := &SharedConn{
|
||||
Conn: conn,
|
||||
buf: bytes.NewBuffer(make([]byte, 0, 1024)),
|
||||
}
|
||||
return sc, io.TeeReader(conn, sc.buf)
|
||||
}
|
||||
|
||||
func NewSharedConnSize(conn net.Conn, bufSize int) (*SharedConn, io.Reader) {
|
||||
sc := &SharedConn{
|
||||
Conn: conn,
|
||||
buf: bytes.NewBuffer(make([]byte, 0, bufSize)),
|
||||
}
|
||||
return sc, io.TeeReader(conn, sc.buf)
|
||||
}
|
||||
|
||||
// Not thread safety.
|
||||
func (sc *SharedConn) Read(p []byte) (n int, err error) {
|
||||
if sc.buf == nil {
|
||||
return sc.Conn.Read(p)
|
||||
}
|
||||
n, err = sc.buf.Read(p)
|
||||
if err == io.EOF {
|
||||
sc.buf = nil
|
||||
var n2 int
|
||||
n2, err = sc.Conn.Read(p[n:])
|
||||
n += n2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (sc *SharedConn) ResetBuf(buffer []byte) (err error) {
|
||||
sc.buf.Reset()
|
||||
_, err = sc.buf.Write(buffer)
|
||||
return err
|
||||
}
|
232
vendor/github.com/fatedier/golib/net/mux/mux.go
generated
vendored
232
vendor/github.com/fatedier/golib/net/mux/mux.go
generated
vendored
@ -1,232 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package mux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/fatedier/golib/errors"
|
||||
gnet "github.com/fatedier/golib/net"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultTimeout is the default length of time to wait for bytes we need.
|
||||
DefaultTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
type Mux struct {
|
||||
ln net.Listener
|
||||
|
||||
defaultLn *listener
|
||||
|
||||
// sorted by priority
|
||||
lns []*listener
|
||||
maxNeedBytesNum uint32
|
||||
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewMux(ln net.Listener) (mux *Mux) {
|
||||
mux = &Mux{
|
||||
ln: ln,
|
||||
lns: make([]*listener, 0),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// priority
|
||||
func (mux *Mux) Listen(priority int, needBytesNum uint32, fn MatchFunc) net.Listener {
|
||||
ln := &listener{
|
||||
c: make(chan net.Conn),
|
||||
mux: mux,
|
||||
priority: priority,
|
||||
needBytesNum: needBytesNum,
|
||||
matchFn: fn,
|
||||
}
|
||||
|
||||
mux.mu.Lock()
|
||||
defer mux.mu.Unlock()
|
||||
if needBytesNum > mux.maxNeedBytesNum {
|
||||
mux.maxNeedBytesNum = needBytesNum
|
||||
}
|
||||
|
||||
newlns := append(mux.copyLns(), ln)
|
||||
sort.Slice(newlns, func(i, j int) bool {
|
||||
if newlns[i].priority == newlns[j].priority {
|
||||
return newlns[i].needBytesNum < newlns[j].needBytesNum
|
||||
}
|
||||
return newlns[i].priority < newlns[j].priority
|
||||
})
|
||||
mux.lns = newlns
|
||||
return ln
|
||||
}
|
||||
|
||||
func (mux *Mux) ListenHttp(priority int) net.Listener {
|
||||
return mux.Listen(priority, HttpNeedBytesNum, HttpMatchFunc)
|
||||
}
|
||||
|
||||
func (mux *Mux) ListenHttps(priority int) net.Listener {
|
||||
return mux.Listen(priority, HttpsNeedBytesNum, HttpsMatchFunc)
|
||||
}
|
||||
|
||||
func (mux *Mux) DefaultListener() net.Listener {
|
||||
mux.mu.Lock()
|
||||
defer mux.mu.Unlock()
|
||||
if mux.defaultLn == nil {
|
||||
mux.defaultLn = &listener{
|
||||
c: make(chan net.Conn),
|
||||
mux: mux,
|
||||
}
|
||||
}
|
||||
return mux.defaultLn
|
||||
}
|
||||
|
||||
func (mux *Mux) release(ln *listener) bool {
|
||||
result := false
|
||||
mux.mu.Lock()
|
||||
defer mux.mu.Unlock()
|
||||
lns := mux.copyLns()
|
||||
|
||||
for i, l := range lns {
|
||||
if l == ln {
|
||||
lns = append(lns[:i], lns[i+1:]...)
|
||||
result = true
|
||||
break
|
||||
}
|
||||
}
|
||||
mux.lns = lns
|
||||
return result
|
||||
}
|
||||
|
||||
func (mux *Mux) copyLns() []*listener {
|
||||
lns := make([]*listener, 0, len(mux.lns))
|
||||
for _, l := range mux.lns {
|
||||
lns = append(lns, l)
|
||||
}
|
||||
return lns
|
||||
}
|
||||
|
||||
// Serve handles connections from ln and multiplexes then across registered listeners.
|
||||
func (mux *Mux) Serve() error {
|
||||
for {
|
||||
// Wait for the next connection.
|
||||
// If it returns a temporary error then simply retry.
|
||||
// If it returns any other error then exit immediately.
|
||||
conn, err := mux.ln.Accept()
|
||||
if err, ok := err.(interface {
|
||||
Temporary() bool
|
||||
}); ok && err.Temporary() {
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go mux.handleConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
func (mux *Mux) handleConn(conn net.Conn) {
|
||||
mux.mu.RLock()
|
||||
maxNeedBytesNum := mux.maxNeedBytesNum
|
||||
lns := mux.lns
|
||||
defaultLn := mux.defaultLn
|
||||
mux.mu.RUnlock()
|
||||
|
||||
sharedConn, rd := gnet.NewSharedConnSize(conn, int(maxNeedBytesNum))
|
||||
data := make([]byte, maxNeedBytesNum)
|
||||
|
||||
conn.SetReadDeadline(time.Now().Add(DefaultTimeout))
|
||||
_, err := io.ReadFull(rd, data)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
conn.SetReadDeadline(time.Time{})
|
||||
|
||||
for _, ln := range lns {
|
||||
if match := ln.matchFn(data); match {
|
||||
err = errors.PanicToError(func() {
|
||||
ln.c <- sharedConn
|
||||
})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// No match listeners
|
||||
if defaultLn != nil {
|
||||
err = errors.PanicToError(func() {
|
||||
defaultLn.c <- sharedConn
|
||||
})
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// No listeners for this connection, close it.
|
||||
conn.Close()
|
||||
return
|
||||
}
|
||||
|
||||
type listener struct {
|
||||
mux *Mux
|
||||
|
||||
priority int
|
||||
needBytesNum uint32
|
||||
matchFn MatchFunc
|
||||
|
||||
c chan net.Conn
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// Accept waits for and returns the next connection to the listener.
|
||||
func (ln *listener) Accept() (net.Conn, error) {
|
||||
conn, ok := <-ln.c
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("network connection closed")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// Close removes this listener from the parent mux and closes the channel.
|
||||
func (ln *listener) Close() error {
|
||||
if ok := ln.mux.release(ln); ok {
|
||||
// Close done to signal to any RLock holders to release their lock.
|
||||
close(ln.c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ln *listener) Addr() net.Addr {
|
||||
if ln.mux == nil {
|
||||
return nil
|
||||
}
|
||||
ln.mux.mu.RLock()
|
||||
defer ln.mux.mu.RUnlock()
|
||||
if ln.mux.ln == nil {
|
||||
return nil
|
||||
}
|
||||
return ln.mux.ln.Addr()
|
||||
}
|
55
vendor/github.com/fatedier/golib/net/mux/rule.go
generated
vendored
55
vendor/github.com/fatedier/golib/net/mux/rule.go
generated
vendored
@ -1,55 +0,0 @@
|
||||
package mux
|
||||
|
||||
type MatchFunc func(data []byte) (match bool)
|
||||
|
||||
var (
|
||||
HttpsNeedBytesNum uint32 = 1
|
||||
HttpNeedBytesNum uint32 = 3
|
||||
YamuxNeedBytesNum uint32 = 2
|
||||
)
|
||||
|
||||
var HttpsMatchFunc MatchFunc = func(data []byte) bool {
|
||||
if len(data) < int(HttpsNeedBytesNum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if data[0] == 0x16 {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// From https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods
|
||||
var httpHeadBytes = map[string]struct{}{
|
||||
"GET": struct{}{},
|
||||
"HEA": struct{}{},
|
||||
"POS": struct{}{},
|
||||
"PUT": struct{}{},
|
||||
"DEL": struct{}{},
|
||||
"CON": struct{}{},
|
||||
"OPT": struct{}{},
|
||||
"TRA": struct{}{},
|
||||
"PAT": struct{}{},
|
||||
}
|
||||
|
||||
var HttpMatchFunc MatchFunc = func(data []byte) bool {
|
||||
if len(data) < int(HttpNeedBytesNum) {
|
||||
return false
|
||||
}
|
||||
|
||||
_, ok := httpHeadBytes[string(data[:3])]
|
||||
return ok
|
||||
}
|
||||
|
||||
// From https://github.com/hashicorp/yamux/blob/master/spec.md
|
||||
var YamuxMatchFunc MatchFunc = func(data []byte) bool {
|
||||
if len(data) < int(YamuxNeedBytesNum) {
|
||||
return false
|
||||
}
|
||||
|
||||
if data[0] == 0 && data[1] >= 0x0 && data[1] <= 0x3 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
107
vendor/github.com/fatedier/golib/net/proxy.go
generated
vendored
107
vendor/github.com/fatedier/golib/net/proxy.go
generated
vendored
@ -1,107 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package net
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/proxy"
|
||||
)
|
||||
|
||||
type ProxyAuth struct {
|
||||
Enable bool
|
||||
Username string
|
||||
Passwd string
|
||||
}
|
||||
|
||||
func DialTcpByProxy(proxyStr string, addr string) (c net.Conn, err error) {
|
||||
if proxyStr == "" {
|
||||
return net.Dial("tcp", addr)
|
||||
}
|
||||
|
||||
var proxyUrl *url.URL
|
||||
if proxyUrl, err = url.Parse(proxyStr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
auth := &ProxyAuth{}
|
||||
if proxyUrl.User != nil {
|
||||
auth.Enable = true
|
||||
auth.Username = proxyUrl.User.Username()
|
||||
auth.Passwd, _ = proxyUrl.User.Password()
|
||||
}
|
||||
|
||||
switch proxyUrl.Scheme {
|
||||
case "http":
|
||||
return DialTcpByHttpProxy(proxyUrl.Host, addr, auth)
|
||||
case "socks5":
|
||||
return DialTcpBySocks5Proxy(proxyUrl.Host, addr, auth)
|
||||
default:
|
||||
err = fmt.Errorf("Proxy URL scheme must be http or socks5, not [%s]", proxyUrl.Scheme)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func DialTcpByHttpProxy(proxyHost string, dstAddr string, auth *ProxyAuth) (c net.Conn, err error) {
|
||||
if c, err = net.Dial("tcp", proxyHost); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("CONNECT", "http://"+dstAddr, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if auth.Enable {
|
||||
req.Header.Set("Proxy-Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(auth.Username+":"+auth.Passwd)))
|
||||
}
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0")
|
||||
req.Write(c)
|
||||
|
||||
resp, err := http.ReadResponse(bufio.NewReader(c), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
err = fmt.Errorf("DialTcpByHttpProxy error, StatusCode [%d]", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DialTcpBySocks5Proxy(proxyHost string, dstAddr string, auth *ProxyAuth) (c net.Conn, err error) {
|
||||
var s5Auth *proxy.Auth
|
||||
if auth.Enable {
|
||||
s5Auth = &proxy.Auth{
|
||||
User: auth.Username,
|
||||
Password: auth.Passwd,
|
||||
}
|
||||
}
|
||||
|
||||
dialer, err := proxy.SOCKS5("tcp", proxyHost, s5Auth, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if c, err = dialer.Dial("tcp", dstAddr); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
65
vendor/github.com/fatedier/golib/pool/buf.go
generated
vendored
65
vendor/github.com/fatedier/golib/pool/buf.go
generated
vendored
@ -1,65 +0,0 @@
|
||||
// Copyright 2018 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
bufPool16k sync.Pool
|
||||
bufPool5k sync.Pool
|
||||
bufPool2k sync.Pool
|
||||
bufPool1k sync.Pool
|
||||
bufPool sync.Pool
|
||||
)
|
||||
|
||||
func GetBuf(size int) []byte {
|
||||
var x interface{}
|
||||
if size >= 16*1024 {
|
||||
x = bufPool16k.Get()
|
||||
} else if size >= 5*1024 {
|
||||
x = bufPool5k.Get()
|
||||
} else if size >= 2*1024 {
|
||||
x = bufPool2k.Get()
|
||||
} else if size >= 1*1024 {
|
||||
x = bufPool1k.Get()
|
||||
} else {
|
||||
x = bufPool.Get()
|
||||
}
|
||||
if x == nil {
|
||||
return make([]byte, size)
|
||||
}
|
||||
buf := x.([]byte)
|
||||
if cap(buf) < size {
|
||||
return make([]byte, size)
|
||||
}
|
||||
return buf[:size]
|
||||
}
|
||||
|
||||
func PutBuf(buf []byte) {
|
||||
size := cap(buf)
|
||||
if size >= 16*1024 {
|
||||
bufPool16k.Put(buf)
|
||||
} else if size >= 5*1024 {
|
||||
bufPool5k.Put(buf)
|
||||
} else if size >= 2*1024 {
|
||||
bufPool2k.Put(buf)
|
||||
} else if size >= 1*1024 {
|
||||
bufPool1k.Put(buf)
|
||||
} else {
|
||||
bufPool.Put(buf)
|
||||
}
|
||||
}
|
57
vendor/github.com/fatedier/golib/pool/snappy.go
generated
vendored
57
vendor/github.com/fatedier/golib/pool/snappy.go
generated
vendored
@ -1,57 +0,0 @@
|
||||
// Copyright 2017 fatedier, fatedier@gmail.com
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package pool
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/snappy"
|
||||
)
|
||||
|
||||
var (
|
||||
snappyReaderPool sync.Pool
|
||||
snappyWriterPool sync.Pool
|
||||
)
|
||||
|
||||
func GetSnappyReader(r io.Reader) *snappy.Reader {
|
||||
var x interface{}
|
||||
x = snappyReaderPool.Get()
|
||||
if x == nil {
|
||||
return snappy.NewReader(r)
|
||||
}
|
||||
sr := x.(*snappy.Reader)
|
||||
sr.Reset(r)
|
||||
return sr
|
||||
}
|
||||
|
||||
func PutSnappyReader(sr *snappy.Reader) {
|
||||
snappyReaderPool.Put(sr)
|
||||
}
|
||||
|
||||
func GetSnappyWriter(w io.Writer) *snappy.Writer {
|
||||
var x interface{}
|
||||
x = snappyWriterPool.Get()
|
||||
if x == nil {
|
||||
return snappy.NewWriter(w)
|
||||
}
|
||||
sw := x.(*snappy.Writer)
|
||||
sw.Reset(w)
|
||||
return sw
|
||||
}
|
||||
|
||||
func PutSnappyWriter(sw *snappy.Writer) {
|
||||
snappyWriterPool.Put(sw)
|
||||
}
|
24
vendor/github.com/fatedier/kcp-go/.gitignore
generated
vendored
24
vendor/github.com/fatedier/kcp-go/.gitignore
generated
vendored
@ -1,24 +0,0 @@
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
17
vendor/github.com/fatedier/kcp-go/.travis.yml
generated
vendored
17
vendor/github.com/fatedier/kcp-go/.travis.yml
generated
vendored
@ -1,17 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.9.x
|
||||
- 1.10.x
|
||||
- 1.11.x
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
|
||||
install:
|
||||
- go get github.com/xtaci/kcp-go
|
||||
|
||||
script:
|
||||
- go test -coverprofile=coverage.txt -covermode=atomic -bench .
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash)
|
22
vendor/github.com/fatedier/kcp-go/LICENSE
generated
vendored
22
vendor/github.com/fatedier/kcp-go/LICENSE
generated
vendored
@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Daniel Fu
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
222
vendor/github.com/fatedier/kcp-go/README.md
generated
vendored
222
vendor/github.com/fatedier/kcp-go/README.md
generated
vendored
@ -1,222 +0,0 @@
|
||||
<img src="kcp-go.png" alt="kcp-go" height="50px" />
|
||||
|
||||
|
||||
[![GoDoc][1]][2] [![Powered][9]][10] [![MIT licensed][11]][12] [![Build Status][3]][4] [![Go Report Card][5]][6] [![Coverage Statusd][7]][8]
|
||||
|
||||
[1]: https://godoc.org/github.com/xtaci/kcp-go?status.svg
|
||||
[2]: https://godoc.org/github.com/xtaci/kcp-go
|
||||
[3]: https://travis-ci.org/xtaci/kcp-go.svg?branch=master
|
||||
[4]: https://travis-ci.org/xtaci/kcp-go
|
||||
[5]: https://goreportcard.com/badge/github.com/xtaci/kcp-go
|
||||
[6]: https://goreportcard.com/report/github.com/xtaci/kcp-go
|
||||
[7]: https://codecov.io/gh/xtaci/kcp-go/branch/master/graph/badge.svg
|
||||
[8]: https://codecov.io/gh/xtaci/kcp-go
|
||||
[9]: https://img.shields.io/badge/KCP-Powered-blue.svg
|
||||
[10]: https://github.com/skywind3000/kcp
|
||||
[11]: https://img.shields.io/badge/license-MIT-blue.svg
|
||||
[12]: LICENSE
|
||||
|
||||
## Introduction
|
||||
|
||||
**kcp-go** is a **Production-Grade Reliable-UDP** library for [golang](https://golang.org/).
|
||||
|
||||
This library intents to provide a **smooth, resilient, ordered, error-checked and anonymous** delivery of streams over **UDP** packets, it has been battle-tested with opensource project [kcptun](https://github.com/xtaci/kcptun). Millions of devices(from low-end MIPS routers to high-end servers) have deployed **kcp-go** powered program in a variety of forms like **online games, live broadcasting, file synchronization and network acceleration**.
|
||||
|
||||
[Lastest Release](https://github.com/xtaci/kcp-go/releases)
|
||||
|
||||
## Features
|
||||
|
||||
1. Designed for **Latency-sensitive** scenarios.
|
||||
1. **Cache friendly** and **Memory optimized** design, offers extremely **High Performance** core.
|
||||
1. Handles **>5K concurrent connections** on a single commodity server.
|
||||
1. Compatible with [net.Conn](https://golang.org/pkg/net/#Conn) and [net.Listener](https://golang.org/pkg/net/#Listener), a drop-in replacement for [net.TCPConn](https://golang.org/pkg/net/#TCPConn).
|
||||
1. [FEC(Forward Error Correction)](https://en.wikipedia.org/wiki/Forward_error_correction) Support with [Reed-Solomon Codes](https://en.wikipedia.org/wiki/Reed%E2%80%93Solomon_error_correction)
|
||||
1. Packet level encryption support with [AES](https://en.wikipedia.org/wiki/Advanced_Encryption_Standard), [TEA](https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm), [3DES](https://en.wikipedia.org/wiki/Triple_DES), [Blowfish](https://en.wikipedia.org/wiki/Blowfish_(cipher)), [Cast5](https://en.wikipedia.org/wiki/CAST-128), [Salsa20]( https://en.wikipedia.org/wiki/Salsa20), etc. in [CFB](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_.28CFB.29) mode, which generates completely anonymous packet.
|
||||
1. Only **A fixed number of goroutines** will be created for the entire server application, costs in **context switch** between goroutines have been taken into consideration.
|
||||
1. Compatible with [skywind3000's](https://github.com/skywind3000) C version with various improvements.
|
||||
1. Platform-dependent optimizations: [sendmmsg](http://man7.org/linux/man-pages/man2/sendmmsg.2.html) and [recvmmsg](http://man7.org/linux/man-pages/man2/recvmmsg.2.html) were expoloited for linux.
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, see the associated [Godoc](https://godoc.org/github.com/xtaci/kcp-go).
|
||||
|
||||
## Specification
|
||||
|
||||
<img src="frame.png" alt="Frame Format" height="109px" />
|
||||
|
||||
```
|
||||
NONCE:
|
||||
16bytes cryptographically secure random number, nonce changes for every packet.
|
||||
|
||||
CRC32:
|
||||
CRC-32 checksum of data using the IEEE polynomial
|
||||
|
||||
FEC TYPE:
|
||||
typeData = 0xF1
|
||||
typeParity = 0xF2
|
||||
|
||||
FEC SEQID:
|
||||
monotonically increasing in range: [0, (0xffffffff/shardSize) * shardSize - 1]
|
||||
|
||||
SIZE:
|
||||
The size of KCP frame plus 2
|
||||
```
|
||||
|
||||
```
|
||||
+-----------------+
|
||||
| SESSION |
|
||||
+-----------------+
|
||||
| KCP(ARQ) |
|
||||
+-----------------+
|
||||
| FEC(OPTIONAL) |
|
||||
+-----------------+
|
||||
| CRYPTO(OPTIONAL)|
|
||||
+-----------------+
|
||||
| UDP(PACKET) |
|
||||
+-----------------+
|
||||
| IP |
|
||||
+-----------------+
|
||||
| LINK |
|
||||
+-----------------+
|
||||
| PHY |
|
||||
+-----------------+
|
||||
(LAYER MODEL OF KCP-GO)
|
||||
```
|
||||
|
||||
|
||||
## Examples
|
||||
|
||||
1. [simple examples](https://github.com/xtaci/kcp-go/tree/master/examples)
|
||||
2. [kcptun client](https://github.com/xtaci/kcptun/blob/master/client/main.go)
|
||||
3. [kcptun server](https://github.com/xtaci/kcptun/blob/master/server/main.go)
|
||||
|
||||
## Benchmark
|
||||
```
|
||||
Model Name: MacBook Pro
|
||||
Model Identifier: MacBookPro14,1
|
||||
Processor Name: Intel Core i5
|
||||
Processor Speed: 3.1 GHz
|
||||
Number of Processors: 1
|
||||
Total Number of Cores: 2
|
||||
L2 Cache (per Core): 256 KB
|
||||
L3 Cache: 4 MB
|
||||
Memory: 8 GB
|
||||
```
|
||||
```
|
||||
$ go test -v -run=^$ -bench .
|
||||
beginning tests, encryption:salsa20, fec:10/3
|
||||
goos: darwin
|
||||
goarch: amd64
|
||||
pkg: github.com/xtaci/kcp-go
|
||||
BenchmarkSM4-4 50000 32180 ns/op 93.23 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkAES128-4 500000 3285 ns/op 913.21 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkAES192-4 300000 3623 ns/op 827.85 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkAES256-4 300000 3874 ns/op 774.20 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkTEA-4 100000 15384 ns/op 195.00 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkXOR-4 20000000 89.9 ns/op 33372.00 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkBlowfish-4 50000 26927 ns/op 111.41 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkNone-4 30000000 45.7 ns/op 65597.94 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkCast5-4 50000 34258 ns/op 87.57 MB/s 0 B/op 0 allocs/op
|
||||
Benchmark3DES-4 10000 117149 ns/op 25.61 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkTwofish-4 50000 33538 ns/op 89.45 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkXTEA-4 30000 45666 ns/op 65.69 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkSalsa20-4 500000 3308 ns/op 906.76 MB/s 0 B/op 0 allocs/op
|
||||
BenchmarkCRC32-4 20000000 65.2 ns/op 15712.43 MB/s
|
||||
BenchmarkCsprngSystem-4 1000000 1150 ns/op 13.91 MB/s
|
||||
BenchmarkCsprngMD5-4 10000000 145 ns/op 110.26 MB/s
|
||||
BenchmarkCsprngSHA1-4 10000000 158 ns/op 126.54 MB/s
|
||||
BenchmarkCsprngNonceMD5-4 10000000 153 ns/op 104.22 MB/s
|
||||
BenchmarkCsprngNonceAES128-4 100000000 19.1 ns/op 837.81 MB/s
|
||||
BenchmarkFECDecode-4 1000000 1119 ns/op 1339.61 MB/s 1606 B/op 2 allocs/op
|
||||
BenchmarkFECEncode-4 2000000 832 ns/op 1801.83 MB/s 17 B/op 0 allocs/op
|
||||
BenchmarkFlush-4 5000000 272 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkEchoSpeed4K-4 5000 259617 ns/op 15.78 MB/s 5451 B/op 149 allocs/op
|
||||
BenchmarkEchoSpeed64K-4 1000 1706084 ns/op 38.41 MB/s 56002 B/op 1604 allocs/op
|
||||
BenchmarkEchoSpeed512K-4 100 14345505 ns/op 36.55 MB/s 482597 B/op 13045 allocs/op
|
||||
BenchmarkEchoSpeed1M-4 30 34859104 ns/op 30.08 MB/s 1143773 B/op 27186 allocs/op
|
||||
BenchmarkSinkSpeed4K-4 50000 31369 ns/op 130.57 MB/s 1566 B/op 30 allocs/op
|
||||
BenchmarkSinkSpeed64K-4 5000 329065 ns/op 199.16 MB/s 21529 B/op 453 allocs/op
|
||||
BenchmarkSinkSpeed256K-4 500 2373354 ns/op 220.91 MB/s 166332 B/op 3554 allocs/op
|
||||
BenchmarkSinkSpeed1M-4 300 5117927 ns/op 204.88 MB/s 310378 B/op 6988 allocs/op
|
||||
PASS
|
||||
ok github.com/xtaci/kcp-go 50.349s
|
||||
```
|
||||
|
||||
|
||||
## Typical Flame Graph
|
||||
![Flame Graph in kcptun](flame.png)
|
||||
|
||||
## Key Design Considerations
|
||||
|
||||
1. slice vs. container/list
|
||||
|
||||
`kcp.flush()` loops through the send queue for retransmission checking for every 20ms(interval).
|
||||
|
||||
I've wrote a benchmark for comparing sequential loop through *slice* and *container/list* here:
|
||||
|
||||
https://github.com/xtaci/notes/blob/master/golang/benchmark2/cachemiss_test.go
|
||||
|
||||
```
|
||||
BenchmarkLoopSlice-4 2000000000 0.39 ns/op
|
||||
BenchmarkLoopList-4 100000000 54.6 ns/op
|
||||
```
|
||||
|
||||
List structure introduces **heavy cache misses** compared to slice which owns better **locality**, 5000 connections with 32 window size and 20ms interval will cost 6us/0.03%(cpu) using slice, and 8.7ms/43.5%(cpu) for list for each `kcp.flush()`.
|
||||
|
||||
2. Timing accuracy vs. syscall clock_gettime
|
||||
|
||||
Timing is **critical** to **RTT estimator**, inaccurate timing leads to false retransmissions in KCP, but calling `time.Now()` costs 42 cycles(10.5ns on 4GHz CPU, 15.6ns on my MacBook Pro 2.7GHz).
|
||||
|
||||
The benchmark for time.Now() lies here:
|
||||
|
||||
https://github.com/xtaci/notes/blob/master/golang/benchmark2/syscall_test.go
|
||||
|
||||
```
|
||||
BenchmarkNow-4 100000000 15.6 ns/op
|
||||
```
|
||||
|
||||
In kcp-go, after each `kcp.output()` function call, current clock time will be updated upon return, and for a single `kcp.flush()` operation, current time will be queried from system once. For most of the time, 5000 connections costs 5000 * 15.6ns = 78us(a fixed cost while no packet needs to be sent), as for 10MB/s data transfering with 1400 MTU, `kcp.output()` will be called around 7500 times and costs 117us for `time.Now()` in **every second**.
|
||||
|
||||
3. Memory management
|
||||
|
||||
Primary memory allocation are done from a global buffer pool xmit.Buf, in kcp-go, when we need to allocate some bytes, we can get from that pool, and a fixed-capacity 1500 bytes(mtuLimit) will be returned, the rx queue, tx queue and fec queue all receive bytes from there, and they will return the bytes to the pool after using to prevent unnecessary zer0ing of bytes. The pool mechanism maintained a high watermark for slice objects, these in-flight objects from the pool will survive from the perodical garbage collection, meanwhile the pool kept the ability to return the memory to runtime if in idle.
|
||||
|
||||
4. Information security
|
||||
|
||||
kcp-go is shipped with builtin packet encryption powered by various block encryption algorithms and works in [Cipher Feedback Mode](https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation#Cipher_Feedback_(CFB)), for each packet to be sent, the encryption process will start from encrypting a [nonce](https://en.wikipedia.org/wiki/Cryptographic_nonce) from the [system entropy](https://en.wikipedia.org/wiki//dev/random), so encryption to same plaintexts never leads to a same ciphertexts thereafter.
|
||||
|
||||
The contents of the packets are completely anonymous with encryption, including the headers(FEC,KCP), checksums and contents. Note that, no matter which encryption method you choose on you upper layer, if you disable encryption, the transmit will be insecure somehow, since the header is ***PLAINTEXT*** to everyone it would be susceptible to header tampering, such as jamming the *sliding window size*, *round-trip time*, *FEC property* and *checksums*. ```AES-128``` is suggested for minimal encryption since modern CPUs are shipped with [AES-NI](https://en.wikipedia.org/wiki/AES_instruction_set) instructions and performs even better than `salsa20`(check the table above).
|
||||
|
||||
Other possible attacks to kcp-go includes: a) [traffic analysis](https://en.wikipedia.org/wiki/Traffic_analysis), dataflow on specific websites may have pattern while interchanging data, but this type of eavesdropping has been mitigated by adapting [smux](https://github.com/xtaci/smux) to mix data streams so as to introduce noises, perfect solution to this has not appeared yet, theroretically by shuffling/mixing messages on larger scale network may mitigate this problem. b) [replay attack](https://en.wikipedia.org/wiki/Replay_attack), since the asymmetrical encryption has not been introduced into kcp-go for some reason, capturing the packets and replay them on a different machine is possible, (notice: hijacking the session and decrypting the contents is still *impossible*), so upper layers should contain a asymmetrical encryption system to guarantee the authenticity of each message(to process message exactly once), such as HTTPS/OpenSSL/LibreSSL, only by signing the requests with private keys can eliminate this type of attack.
|
||||
|
||||
## Connection Termination
|
||||
|
||||
Control messages like **SYN/FIN/RST** in TCP **are not defined** in KCP, you need some **keepalive/heartbeat mechanism** in the application-level. A real world example is to use some **multiplexing** protocol over session, such as [smux](https://github.com/xtaci/smux)(with embedded keepalive mechanism), see [kcptun](https://github.com/xtaci/kcptun) for example.
|
||||
|
||||
## FAQ
|
||||
|
||||
Q: I'm handling >5K connections on my server, the CPU utilization is so high.
|
||||
|
||||
A: A standalone `agent` or `gate` server for running kcp-go is suggested, not only for CPU utilization, but also important to the **precision** of RTT measurements(timing) which indirectly affects retransmission. By increasing update `interval` with `SetNoDelay` like `conn.SetNoDelay(1, 40, 1, 1)` will dramatically reduce system load, but lower the performance.
|
||||
|
||||
Q: When should I enable FEC?
|
||||
|
||||
A: Forward error correction is critical to long-distance transmission, because a packet loss will lead to a huge penalty in time. And for the complicated packet routing network in modern world, round-trip time based loss check will not always be efficient, the big deviation of RTT samples in the long way usually leads to a larger RTO value in typical rtt estimator, which in other words, slows down the transmission.
|
||||
|
||||
Q: Should I enable encryption?
|
||||
|
||||
A: Yes, for the safety of protocol, even if the upper layer has encrypted.
|
||||
|
||||
## Who is using this?
|
||||
|
||||
1. https://github.com/xtaci/kcptun -- A Secure Tunnel Based On KCP over UDP.
|
||||
2. https://github.com/getlantern/lantern -- Lantern delivers fast access to the open Internet.
|
||||
3. https://github.com/smallnest/rpcx -- A RPC service framework based on net/rpc like alibaba Dubbo and weibo Motan.
|
||||
4. https://github.com/gonet2/agent -- A gateway for games with stream multiplexing.
|
||||
5. https://github.com/syncthing/syncthing -- Open Source Continuous File Synchronization.
|
||||
|
||||
## Links
|
||||
|
||||
1. https://github.com/xtaci/libkcp -- FEC enhanced KCP session library for iOS/Android in C++
|
||||
2. https://github.com/skywind3000/kcp -- A Fast and Reliable ARQ Protocol
|
||||
3. https://github.com/klauspost/reedsolomon -- Reed-Solomon Erasure Coding in Go
|
12
vendor/github.com/fatedier/kcp-go/batchconn.go
generated
vendored
12
vendor/github.com/fatedier/kcp-go/batchconn.go
generated
vendored
@ -1,12 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import "golang.org/x/net/ipv4"
|
||||
|
||||
const (
|
||||
batchSize = 16
|
||||
)
|
||||
|
||||
type batchConn interface {
|
||||
WriteBatch(ms []ipv4.Message, flags int) (int, error)
|
||||
ReadBatch(ms []ipv4.Message, flags int) (int, error)
|
||||
}
|
785
vendor/github.com/fatedier/kcp-go/crypt.go
generated
vendored
785
vendor/github.com/fatedier/kcp-go/crypt.go
generated
vendored
@ -1,785 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/sha1"
|
||||
|
||||
"github.com/templexxx/xor"
|
||||
"github.com/tjfoc/gmsm/sm4"
|
||||
|
||||
"golang.org/x/crypto/blowfish"
|
||||
"golang.org/x/crypto/cast5"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
"golang.org/x/crypto/salsa20"
|
||||
"golang.org/x/crypto/tea"
|
||||
"golang.org/x/crypto/twofish"
|
||||
"golang.org/x/crypto/xtea"
|
||||
)
|
||||
|
||||
var (
|
||||
initialVector = []byte{167, 115, 79, 156, 18, 172, 27, 1, 164, 21, 242, 193, 252, 120, 230, 107}
|
||||
saltxor = `sH3CIVoF#rWLtJo6`
|
||||
)
|
||||
|
||||
// BlockCrypt defines encryption/decryption methods for a given byte slice.
|
||||
// Notes on implementing: the data to be encrypted contains a builtin
|
||||
// nonce at the first 16 bytes
|
||||
type BlockCrypt interface {
|
||||
// Encrypt encrypts the whole block in src into dst.
|
||||
// Dst and src may point at the same memory.
|
||||
Encrypt(dst, src []byte)
|
||||
|
||||
// Decrypt decrypts the whole block in src into dst.
|
||||
// Dst and src may point at the same memory.
|
||||
Decrypt(dst, src []byte)
|
||||
}
|
||||
|
||||
type salsa20BlockCrypt struct {
|
||||
key [32]byte
|
||||
}
|
||||
|
||||
// NewSalsa20BlockCrypt https://en.wikipedia.org/wiki/Salsa20
|
||||
func NewSalsa20BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(salsa20BlockCrypt)
|
||||
copy(c.key[:], key)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *salsa20BlockCrypt) Encrypt(dst, src []byte) {
|
||||
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
|
||||
copy(dst[:8], src[:8])
|
||||
}
|
||||
func (c *salsa20BlockCrypt) Decrypt(dst, src []byte) {
|
||||
salsa20.XORKeyStream(dst[8:], src[8:], src[:8], &c.key)
|
||||
copy(dst[:8], src[:8])
|
||||
}
|
||||
|
||||
type sm4BlockCrypt struct {
|
||||
encbuf [sm4.BlockSize]byte
|
||||
decbuf [2 * sm4.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewSM4BlockCrypt https://github.com/tjfoc/gmsm/tree/master/sm4
|
||||
func NewSM4BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(sm4BlockCrypt)
|
||||
block, err := sm4.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *sm4BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *sm4BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type twofishBlockCrypt struct {
|
||||
encbuf [twofish.BlockSize]byte
|
||||
decbuf [2 * twofish.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTwofishBlockCrypt https://en.wikipedia.org/wiki/Twofish
|
||||
func NewTwofishBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(twofishBlockCrypt)
|
||||
block, err := twofish.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *twofishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *twofishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type tripleDESBlockCrypt struct {
|
||||
encbuf [des.BlockSize]byte
|
||||
decbuf [2 * des.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTripleDESBlockCrypt https://en.wikipedia.org/wiki/Triple_DES
|
||||
func NewTripleDESBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(tripleDESBlockCrypt)
|
||||
block, err := des.NewTripleDESCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *tripleDESBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *tripleDESBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type cast5BlockCrypt struct {
|
||||
encbuf [cast5.BlockSize]byte
|
||||
decbuf [2 * cast5.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewCast5BlockCrypt https://en.wikipedia.org/wiki/CAST-128
|
||||
func NewCast5BlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(cast5BlockCrypt)
|
||||
block, err := cast5.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *cast5BlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *cast5BlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type blowfishBlockCrypt struct {
|
||||
encbuf [blowfish.BlockSize]byte
|
||||
decbuf [2 * blowfish.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewBlowfishBlockCrypt https://en.wikipedia.org/wiki/Blowfish_(cipher)
|
||||
func NewBlowfishBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(blowfishBlockCrypt)
|
||||
block, err := blowfish.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *blowfishBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *blowfishBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type aesBlockCrypt struct {
|
||||
encbuf [aes.BlockSize]byte
|
||||
decbuf [2 * aes.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewAESBlockCrypt https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
|
||||
func NewAESBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(aesBlockCrypt)
|
||||
block, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *aesBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *aesBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type teaBlockCrypt struct {
|
||||
encbuf [tea.BlockSize]byte
|
||||
decbuf [2 * tea.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewTEABlockCrypt https://en.wikipedia.org/wiki/Tiny_Encryption_Algorithm
|
||||
func NewTEABlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(teaBlockCrypt)
|
||||
block, err := tea.NewCipherWithRounds(key, 16)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *teaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *teaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type xteaBlockCrypt struct {
|
||||
encbuf [xtea.BlockSize]byte
|
||||
decbuf [2 * xtea.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
// NewXTEABlockCrypt https://en.wikipedia.org/wiki/XTEA
|
||||
func NewXTEABlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(xteaBlockCrypt)
|
||||
block, err := xtea.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.block = block
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *xteaBlockCrypt) Encrypt(dst, src []byte) { encrypt(c.block, dst, src, c.encbuf[:]) }
|
||||
func (c *xteaBlockCrypt) Decrypt(dst, src []byte) { decrypt(c.block, dst, src, c.decbuf[:]) }
|
||||
|
||||
type simpleXORBlockCrypt struct {
|
||||
xortbl []byte
|
||||
}
|
||||
|
||||
// NewSimpleXORBlockCrypt simple xor with key expanding
|
||||
func NewSimpleXORBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
c := new(simpleXORBlockCrypt)
|
||||
c.xortbl = pbkdf2.Key(key, []byte(saltxor), 32, mtuLimit, sha1.New)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *simpleXORBlockCrypt) Encrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
|
||||
func (c *simpleXORBlockCrypt) Decrypt(dst, src []byte) { xor.Bytes(dst, src, c.xortbl) }
|
||||
|
||||
type noneBlockCrypt struct{}
|
||||
|
||||
// NewNoneBlockCrypt does nothing but copying
|
||||
func NewNoneBlockCrypt(key []byte) (BlockCrypt, error) {
|
||||
return new(noneBlockCrypt), nil
|
||||
}
|
||||
|
||||
func (c *noneBlockCrypt) Encrypt(dst, src []byte) { copy(dst, src) }
|
||||
func (c *noneBlockCrypt) Decrypt(dst, src []byte) { copy(dst, src) }
|
||||
|
||||
// packet encryption with local CFB mode
|
||||
func encrypt(block cipher.Block, dst, src, buf []byte) {
|
||||
switch block.BlockSize() {
|
||||
case 8:
|
||||
encrypt8(block, dst, src, buf)
|
||||
case 16:
|
||||
encrypt16(block, dst, src, buf)
|
||||
default:
|
||||
encryptVariant(block, dst, src, buf)
|
||||
}
|
||||
}
|
||||
|
||||
// optimized encryption for the ciphers which works in 8-bytes
|
||||
func encrypt8(block cipher.Block, dst, src, buf []byte) {
|
||||
tbl := buf[:8]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / 8
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
s := src[base:][0:64]
|
||||
d := dst[base:][0:64]
|
||||
// 1
|
||||
xor.BytesSrc1(d[0:8], s[0:8], tbl)
|
||||
block.Encrypt(tbl, d[0:8])
|
||||
// 2
|
||||
xor.BytesSrc1(d[8:16], s[8:16], tbl)
|
||||
block.Encrypt(tbl, d[8:16])
|
||||
// 3
|
||||
xor.BytesSrc1(d[16:24], s[16:24], tbl)
|
||||
block.Encrypt(tbl, d[16:24])
|
||||
// 4
|
||||
xor.BytesSrc1(d[24:32], s[24:32], tbl)
|
||||
block.Encrypt(tbl, d[24:32])
|
||||
// 5
|
||||
xor.BytesSrc1(d[32:40], s[32:40], tbl)
|
||||
block.Encrypt(tbl, d[32:40])
|
||||
// 6
|
||||
xor.BytesSrc1(d[40:48], s[40:48], tbl)
|
||||
block.Encrypt(tbl, d[40:48])
|
||||
// 7
|
||||
xor.BytesSrc1(d[48:56], s[48:56], tbl)
|
||||
block.Encrypt(tbl, d[48:56])
|
||||
// 8
|
||||
xor.BytesSrc1(d[56:64], s[56:64], tbl)
|
||||
block.Encrypt(tbl, d[56:64])
|
||||
base += 64
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 6:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 5:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 4:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 3:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 2:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 1:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 8
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
||||
|
||||
// optimized encryption for the ciphers which works in 16-bytes
|
||||
func encrypt16(block cipher.Block, dst, src, buf []byte) {
|
||||
tbl := buf[:16]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / 16
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
s := src[base:][0:128]
|
||||
d := dst[base:][0:128]
|
||||
// 1
|
||||
xor.BytesSrc1(d[0:16], s[0:16], tbl)
|
||||
block.Encrypt(tbl, d[0:16])
|
||||
// 2
|
||||
xor.BytesSrc1(d[16:32], s[16:32], tbl)
|
||||
block.Encrypt(tbl, d[16:32])
|
||||
// 3
|
||||
xor.BytesSrc1(d[32:48], s[32:48], tbl)
|
||||
block.Encrypt(tbl, d[32:48])
|
||||
// 4
|
||||
xor.BytesSrc1(d[48:64], s[48:64], tbl)
|
||||
block.Encrypt(tbl, d[48:64])
|
||||
// 5
|
||||
xor.BytesSrc1(d[64:80], s[64:80], tbl)
|
||||
block.Encrypt(tbl, d[64:80])
|
||||
// 6
|
||||
xor.BytesSrc1(d[80:96], s[80:96], tbl)
|
||||
block.Encrypt(tbl, d[80:96])
|
||||
// 7
|
||||
xor.BytesSrc1(d[96:112], s[96:112], tbl)
|
||||
block.Encrypt(tbl, d[96:112])
|
||||
// 8
|
||||
xor.BytesSrc1(d[112:128], s[112:128], tbl)
|
||||
block.Encrypt(tbl, d[112:128])
|
||||
base += 128
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 6:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 5:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 4:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 3:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 2:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 1:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += 16
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
||||
|
||||
func encryptVariant(block cipher.Block, dst, src, buf []byte) {
|
||||
blocksize := block.BlockSize()
|
||||
tbl := buf[:blocksize]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / blocksize
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
// 1
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 2
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 3
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 4
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 5
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 6
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 7
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
|
||||
// 8
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 6:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 5:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 4:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 3:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 2:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 1:
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
block.Encrypt(tbl, dst[base:])
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
||||
|
||||
// decryption
|
||||
func decrypt(block cipher.Block, dst, src, buf []byte) {
|
||||
switch block.BlockSize() {
|
||||
case 8:
|
||||
decrypt8(block, dst, src, buf)
|
||||
case 16:
|
||||
decrypt16(block, dst, src, buf)
|
||||
default:
|
||||
decryptVariant(block, dst, src, buf)
|
||||
}
|
||||
}
|
||||
|
||||
func decrypt8(block cipher.Block, dst, src, buf []byte) {
|
||||
tbl := buf[0:8]
|
||||
next := buf[8:16]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / 8
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
s := src[base:][0:64]
|
||||
d := dst[base:][0:64]
|
||||
// 1
|
||||
block.Encrypt(next, s[0:8])
|
||||
xor.BytesSrc1(d[0:8], s[0:8], tbl)
|
||||
// 2
|
||||
block.Encrypt(tbl, s[8:16])
|
||||
xor.BytesSrc1(d[8:16], s[8:16], next)
|
||||
// 3
|
||||
block.Encrypt(next, s[16:24])
|
||||
xor.BytesSrc1(d[16:24], s[16:24], tbl)
|
||||
// 4
|
||||
block.Encrypt(tbl, s[24:32])
|
||||
xor.BytesSrc1(d[24:32], s[24:32], next)
|
||||
// 5
|
||||
block.Encrypt(next, s[32:40])
|
||||
xor.BytesSrc1(d[32:40], s[32:40], tbl)
|
||||
// 6
|
||||
block.Encrypt(tbl, s[40:48])
|
||||
xor.BytesSrc1(d[40:48], s[40:48], next)
|
||||
// 7
|
||||
block.Encrypt(next, s[48:56])
|
||||
xor.BytesSrc1(d[48:56], s[48:56], tbl)
|
||||
// 8
|
||||
block.Encrypt(tbl, s[56:64])
|
||||
xor.BytesSrc1(d[56:64], s[56:64], next)
|
||||
base += 64
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 6:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 5:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 4:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 3:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 2:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 1:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 8
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
||||
|
||||
func decrypt16(block cipher.Block, dst, src, buf []byte) {
|
||||
tbl := buf[0:16]
|
||||
next := buf[16:32]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / 16
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
s := src[base:][0:128]
|
||||
d := dst[base:][0:128]
|
||||
// 1
|
||||
block.Encrypt(next, s[0:16])
|
||||
xor.BytesSrc1(d[0:16], s[0:16], tbl)
|
||||
// 2
|
||||
block.Encrypt(tbl, s[16:32])
|
||||
xor.BytesSrc1(d[16:32], s[16:32], next)
|
||||
// 3
|
||||
block.Encrypt(next, s[32:48])
|
||||
xor.BytesSrc1(d[32:48], s[32:48], tbl)
|
||||
// 4
|
||||
block.Encrypt(tbl, s[48:64])
|
||||
xor.BytesSrc1(d[48:64], s[48:64], next)
|
||||
// 5
|
||||
block.Encrypt(next, s[64:80])
|
||||
xor.BytesSrc1(d[64:80], s[64:80], tbl)
|
||||
// 6
|
||||
block.Encrypt(tbl, s[80:96])
|
||||
xor.BytesSrc1(d[80:96], s[80:96], next)
|
||||
// 7
|
||||
block.Encrypt(next, s[96:112])
|
||||
xor.BytesSrc1(d[96:112], s[96:112], tbl)
|
||||
// 8
|
||||
block.Encrypt(tbl, s[112:128])
|
||||
xor.BytesSrc1(d[112:128], s[112:128], next)
|
||||
base += 128
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 6:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 5:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 4:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 3:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 2:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 1:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += 16
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
||||
|
||||
func decryptVariant(block cipher.Block, dst, src, buf []byte) {
|
||||
blocksize := block.BlockSize()
|
||||
tbl := buf[:blocksize]
|
||||
next := buf[blocksize:]
|
||||
block.Encrypt(tbl, initialVector)
|
||||
n := len(src) / blocksize
|
||||
base := 0
|
||||
repeat := n / 8
|
||||
left := n % 8
|
||||
for i := 0; i < repeat; i++ {
|
||||
// 1
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
base += blocksize
|
||||
|
||||
// 2
|
||||
block.Encrypt(tbl, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], next)
|
||||
base += blocksize
|
||||
|
||||
// 3
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
base += blocksize
|
||||
|
||||
// 4
|
||||
block.Encrypt(tbl, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], next)
|
||||
base += blocksize
|
||||
|
||||
// 5
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
base += blocksize
|
||||
|
||||
// 6
|
||||
block.Encrypt(tbl, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], next)
|
||||
base += blocksize
|
||||
|
||||
// 7
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
base += blocksize
|
||||
|
||||
// 8
|
||||
block.Encrypt(tbl, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], next)
|
||||
base += blocksize
|
||||
}
|
||||
|
||||
switch left {
|
||||
case 7:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 6:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 5:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 4:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 3:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 2:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 1:
|
||||
block.Encrypt(next, src[base:])
|
||||
xor.BytesSrc1(dst[base:], src[base:], tbl)
|
||||
tbl, next = next, tbl
|
||||
base += blocksize
|
||||
fallthrough
|
||||
case 0:
|
||||
xor.BytesSrc0(dst[base:], src[base:], tbl)
|
||||
}
|
||||
}
|
BIN
vendor/github.com/fatedier/kcp-go/donate.png
generated
vendored
BIN
vendor/github.com/fatedier/kcp-go/donate.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 4.3 KiB |
52
vendor/github.com/fatedier/kcp-go/entropy.go
generated
vendored
52
vendor/github.com/fatedier/kcp-go/entropy.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Entropy defines a entropy source
|
||||
type Entropy interface {
|
||||
Init()
|
||||
Fill(nonce []byte)
|
||||
}
|
||||
|
||||
// nonceMD5 nonce generator for packet header
|
||||
type nonceMD5 struct {
|
||||
seed [md5.Size]byte
|
||||
}
|
||||
|
||||
func (n *nonceMD5) Init() { /*nothing required*/ }
|
||||
|
||||
func (n *nonceMD5) Fill(nonce []byte) {
|
||||
if n.seed[0] == 0 { // entropy update
|
||||
io.ReadFull(rand.Reader, n.seed[:])
|
||||
}
|
||||
n.seed = md5.Sum(n.seed[:])
|
||||
copy(nonce, n.seed[:])
|
||||
}
|
||||
|
||||
// nonceAES128 nonce generator for packet headers
|
||||
type nonceAES128 struct {
|
||||
seed [aes.BlockSize]byte
|
||||
block cipher.Block
|
||||
}
|
||||
|
||||
func (n *nonceAES128) Init() {
|
||||
var key [16]byte //aes-128
|
||||
io.ReadFull(rand.Reader, key[:])
|
||||
io.ReadFull(rand.Reader, n.seed[:])
|
||||
block, _ := aes.NewCipher(key[:])
|
||||
n.block = block
|
||||
}
|
||||
|
||||
func (n *nonceAES128) Fill(nonce []byte) {
|
||||
if n.seed[0] == 0 { // entropy update
|
||||
io.ReadFull(rand.Reader, n.seed[:])
|
||||
}
|
||||
n.block.Encrypt(n.seed[:], n.seed[:])
|
||||
copy(nonce, n.seed[:])
|
||||
}
|
308
vendor/github.com/fatedier/kcp-go/fec.go
generated
vendored
308
vendor/github.com/fatedier/kcp-go/fec.go
generated
vendored
@ -1,308 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/klauspost/reedsolomon"
|
||||
)
|
||||
|
||||
const (
|
||||
fecHeaderSize = 6
|
||||
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
|
||||
typeData = 0xf1
|
||||
typeParity = 0xf2
|
||||
)
|
||||
|
||||
// fecPacket is a decoded FEC packet
|
||||
type fecPacket []byte
|
||||
|
||||
func (bts fecPacket) seqid() uint32 { return binary.LittleEndian.Uint32(bts) }
|
||||
func (bts fecPacket) flag() uint16 { return binary.LittleEndian.Uint16(bts[4:]) }
|
||||
func (bts fecPacket) data() []byte { return bts[6:] }
|
||||
|
||||
// fecDecoder for decoding incoming packets
|
||||
type fecDecoder struct {
|
||||
rxlimit int // queue size limit
|
||||
dataShards int
|
||||
parityShards int
|
||||
shardSize int
|
||||
rx []fecPacket // ordered receive queue
|
||||
|
||||
// caches
|
||||
decodeCache [][]byte
|
||||
flagCache []bool
|
||||
|
||||
// zeros
|
||||
zeros []byte
|
||||
|
||||
// RS decoder
|
||||
codec reedsolomon.Encoder
|
||||
}
|
||||
|
||||
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
|
||||
if dataShards <= 0 || parityShards <= 0 {
|
||||
return nil
|
||||
}
|
||||
if rxlimit < dataShards+parityShards {
|
||||
return nil
|
||||
}
|
||||
|
||||
dec := new(fecDecoder)
|
||||
dec.rxlimit = rxlimit
|
||||
dec.dataShards = dataShards
|
||||
dec.parityShards = parityShards
|
||||
dec.shardSize = dataShards + parityShards
|
||||
codec, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
dec.codec = codec
|
||||
dec.decodeCache = make([][]byte, dec.shardSize)
|
||||
dec.flagCache = make([]bool, dec.shardSize)
|
||||
dec.zeros = make([]byte, mtuLimit)
|
||||
return dec
|
||||
}
|
||||
|
||||
// decode a fec packet
|
||||
func (dec *fecDecoder) decode(in fecPacket) (recovered [][]byte) {
|
||||
// insertion
|
||||
n := len(dec.rx) - 1
|
||||
insertIdx := 0
|
||||
for i := n; i >= 0; i-- {
|
||||
if in.seqid() == dec.rx[i].seqid() { // de-duplicate
|
||||
return nil
|
||||
} else if _itimediff(in.seqid(), dec.rx[i].seqid()) > 0 { // insertion
|
||||
insertIdx = i + 1
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// make a copy
|
||||
pkt := fecPacket(xmitBuf.Get().([]byte)[:len(in)])
|
||||
copy(pkt, in)
|
||||
|
||||
// insert into ordered rx queue
|
||||
if insertIdx == n+1 {
|
||||
dec.rx = append(dec.rx, pkt)
|
||||
} else {
|
||||
dec.rx = append(dec.rx, fecPacket{})
|
||||
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
|
||||
dec.rx[insertIdx] = pkt
|
||||
}
|
||||
|
||||
// shard range for current packet
|
||||
shardBegin := pkt.seqid() - pkt.seqid()%uint32(dec.shardSize)
|
||||
shardEnd := shardBegin + uint32(dec.shardSize) - 1
|
||||
|
||||
// max search range in ordered queue for current shard
|
||||
searchBegin := insertIdx - int(pkt.seqid()%uint32(dec.shardSize))
|
||||
if searchBegin < 0 {
|
||||
searchBegin = 0
|
||||
}
|
||||
searchEnd := searchBegin + dec.shardSize - 1
|
||||
if searchEnd >= len(dec.rx) {
|
||||
searchEnd = len(dec.rx) - 1
|
||||
}
|
||||
|
||||
// re-construct datashards
|
||||
if searchEnd-searchBegin+1 >= dec.dataShards {
|
||||
var numshard, numDataShard, first, maxlen int
|
||||
|
||||
// zero caches
|
||||
shards := dec.decodeCache
|
||||
shardsflag := dec.flagCache
|
||||
for k := range dec.decodeCache {
|
||||
shards[k] = nil
|
||||
shardsflag[k] = false
|
||||
}
|
||||
|
||||
// shard assembly
|
||||
for i := searchBegin; i <= searchEnd; i++ {
|
||||
seqid := dec.rx[i].seqid()
|
||||
if _itimediff(seqid, shardEnd) > 0 {
|
||||
break
|
||||
} else if _itimediff(seqid, shardBegin) >= 0 {
|
||||
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data()
|
||||
shardsflag[seqid%uint32(dec.shardSize)] = true
|
||||
numshard++
|
||||
if dec.rx[i].flag() == typeData {
|
||||
numDataShard++
|
||||
}
|
||||
if numshard == 1 {
|
||||
first = i
|
||||
}
|
||||
if len(dec.rx[i].data()) > maxlen {
|
||||
maxlen = len(dec.rx[i].data())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numDataShard == dec.dataShards {
|
||||
// case 1: no loss on data shards
|
||||
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
||||
} else if numshard >= dec.dataShards {
|
||||
// case 2: loss on data shards, but it's recoverable from parity shards
|
||||
for k := range shards {
|
||||
if shards[k] != nil {
|
||||
dlen := len(shards[k])
|
||||
shards[k] = shards[k][:maxlen]
|
||||
copy(shards[k][dlen:], dec.zeros)
|
||||
} else {
|
||||
shards[k] = xmitBuf.Get().([]byte)[:0]
|
||||
}
|
||||
}
|
||||
if err := dec.codec.ReconstructData(shards); err == nil {
|
||||
for k := range shards[:dec.dataShards] {
|
||||
if !shardsflag[k] {
|
||||
// recovered data should be recycled
|
||||
recovered = append(recovered, shards[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
||||
}
|
||||
}
|
||||
|
||||
// keep rxlimit
|
||||
if len(dec.rx) > dec.rxlimit {
|
||||
if dec.rx[0].flag() == typeData { // track the unrecoverable data
|
||||
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
|
||||
}
|
||||
dec.rx = dec.freeRange(0, 1, dec.rx)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// free a range of fecPacket
|
||||
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
|
||||
for i := first; i < first+n; i++ { // recycle buffer
|
||||
xmitBuf.Put([]byte(q[i]))
|
||||
}
|
||||
|
||||
if first == 0 && n < cap(q)/2 {
|
||||
return q[n:]
|
||||
}
|
||||
copy(q[first:], q[first+n:])
|
||||
return q[:len(q)-n]
|
||||
}
|
||||
|
||||
type (
|
||||
// fecEncoder for encoding outgoing packets
|
||||
fecEncoder struct {
|
||||
dataShards int
|
||||
parityShards int
|
||||
shardSize int
|
||||
paws uint32 // Protect Against Wrapped Sequence numbers
|
||||
next uint32 // next seqid
|
||||
|
||||
shardCount int // count the number of datashards collected
|
||||
maxSize int // track maximum data length in datashard
|
||||
|
||||
headerOffset int // FEC header offset
|
||||
payloadOffset int // FEC payload offset
|
||||
|
||||
// caches
|
||||
shardCache [][]byte
|
||||
encodeCache [][]byte
|
||||
|
||||
// zeros
|
||||
zeros []byte
|
||||
|
||||
// RS encoder
|
||||
codec reedsolomon.Encoder
|
||||
}
|
||||
)
|
||||
|
||||
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
|
||||
if dataShards <= 0 || parityShards <= 0 {
|
||||
return nil
|
||||
}
|
||||
enc := new(fecEncoder)
|
||||
enc.dataShards = dataShards
|
||||
enc.parityShards = parityShards
|
||||
enc.shardSize = dataShards + parityShards
|
||||
enc.paws = 0xffffffff / uint32(enc.shardSize) * uint32(enc.shardSize)
|
||||
enc.headerOffset = offset
|
||||
enc.payloadOffset = enc.headerOffset + fecHeaderSize
|
||||
|
||||
codec, err := reedsolomon.New(dataShards, parityShards)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
enc.codec = codec
|
||||
|
||||
// caches
|
||||
enc.encodeCache = make([][]byte, enc.shardSize)
|
||||
enc.shardCache = make([][]byte, enc.shardSize)
|
||||
for k := range enc.shardCache {
|
||||
enc.shardCache[k] = make([]byte, mtuLimit)
|
||||
}
|
||||
enc.zeros = make([]byte, mtuLimit)
|
||||
return enc
|
||||
}
|
||||
|
||||
// encodes the packet, outputs parity shards if we have collected quorum datashards
|
||||
// notice: the contents of 'ps' will be re-written in successive calling
|
||||
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
|
||||
// The header format:
|
||||
// | FEC SEQID(4B) | FEC TYPE(2B) | SIZE (2B) | PAYLOAD(SIZE-2) |
|
||||
// |<-headerOffset |<-payloadOffset
|
||||
enc.markData(b[enc.headerOffset:])
|
||||
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
|
||||
|
||||
// copy data from payloadOffset to fec shard cache
|
||||
sz := len(b)
|
||||
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
|
||||
copy(enc.shardCache[enc.shardCount][enc.payloadOffset:], b[enc.payloadOffset:])
|
||||
enc.shardCount++
|
||||
|
||||
// track max datashard length
|
||||
if sz > enc.maxSize {
|
||||
enc.maxSize = sz
|
||||
}
|
||||
|
||||
// Generation of Reed-Solomon Erasure Code
|
||||
if enc.shardCount == enc.dataShards {
|
||||
// fill '0' into the tail of each datashard
|
||||
for i := 0; i < enc.dataShards; i++ {
|
||||
shard := enc.shardCache[i]
|
||||
slen := len(shard)
|
||||
copy(shard[slen:enc.maxSize], enc.zeros)
|
||||
}
|
||||
|
||||
// construct equal-sized slice with stripped header
|
||||
cache := enc.encodeCache
|
||||
for k := range cache {
|
||||
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
|
||||
}
|
||||
|
||||
// encoding
|
||||
if err := enc.codec.Encode(cache); err == nil {
|
||||
ps = enc.shardCache[enc.dataShards:]
|
||||
for k := range ps {
|
||||
enc.markParity(ps[k][enc.headerOffset:])
|
||||
ps[k] = ps[k][:enc.maxSize]
|
||||
}
|
||||
}
|
||||
|
||||
// counters resetting
|
||||
enc.shardCount = 0
|
||||
enc.maxSize = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (enc *fecEncoder) markData(data []byte) {
|
||||
binary.LittleEndian.PutUint32(data, enc.next)
|
||||
binary.LittleEndian.PutUint16(data[4:], typeData)
|
||||
enc.next++
|
||||
}
|
||||
|
||||
func (enc *fecEncoder) markParity(data []byte) {
|
||||
binary.LittleEndian.PutUint32(data, enc.next)
|
||||
binary.LittleEndian.PutUint16(data[4:], typeParity)
|
||||
// sequence wrap will only happen at parity shard
|
||||
enc.next = (enc.next + 1) % enc.paws
|
||||
}
|
BIN
vendor/github.com/fatedier/kcp-go/flame.png
generated
vendored
BIN
vendor/github.com/fatedier/kcp-go/flame.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 56 KiB |
BIN
vendor/github.com/fatedier/kcp-go/frame.png
generated
vendored
BIN
vendor/github.com/fatedier/kcp-go/frame.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 35 KiB |
BIN
vendor/github.com/fatedier/kcp-go/kcp-go.png
generated
vendored
BIN
vendor/github.com/fatedier/kcp-go/kcp-go.png
generated
vendored
Binary file not shown.
Before Width: | Height: | Size: 8.9 KiB |
1053
vendor/github.com/fatedier/kcp-go/kcp.go
generated
vendored
1053
vendor/github.com/fatedier/kcp-go/kcp.go
generated
vendored
File diff suppressed because it is too large
Load Diff
48
vendor/github.com/fatedier/kcp-go/readloop.go
generated
vendored
48
vendor/github.com/fatedier/kcp-go/readloop.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (s *UDPSession) defaultReadLoop() {
|
||||
buf := make([]byte, mtuLimit)
|
||||
var src string
|
||||
for {
|
||||
if n, addr, err := s.conn.ReadFrom(buf); err == nil {
|
||||
// make sure the packet is from the same source
|
||||
if src == "" { // set source address
|
||||
src = addr.String()
|
||||
} else if addr.String() != src {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
if n >= s.headerSize+IKCP_OVERHEAD {
|
||||
s.packetInput(buf[:n])
|
||||
} else {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
}
|
||||
} else {
|
||||
s.notifyReadError(errors.WithStack(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *Listener) defaultMonitor() {
|
||||
buf := make([]byte, mtuLimit)
|
||||
for {
|
||||
if n, from, err := l.conn.ReadFrom(buf); err == nil {
|
||||
if n >= l.headerSize+IKCP_OVERHEAD {
|
||||
l.packetInput(buf[:n], from)
|
||||
} else {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
}
|
||||
} else {
|
||||
l.notifyReadError(errors.WithStack(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
11
vendor/github.com/fatedier/kcp-go/readloop_generic.go
generated
vendored
11
vendor/github.com/fatedier/kcp-go/readloop_generic.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package kcp
|
||||
|
||||
func (s *UDPSession) readLoop() {
|
||||
s.defaultReadLoop()
|
||||
}
|
||||
|
||||
func (l *Listener) monitor() {
|
||||
l.defaultMonitor()
|
||||
}
|
120
vendor/github.com/fatedier/kcp-go/readloop_linux.go
generated
vendored
120
vendor/github.com/fatedier/kcp-go/readloop_linux.go
generated
vendored
@ -1,120 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/ipv4"
|
||||
"golang.org/x/net/ipv6"
|
||||
)
|
||||
|
||||
// the read loop for a client session
|
||||
func (s *UDPSession) readLoop() {
|
||||
// default version
|
||||
if s.xconn == nil {
|
||||
s.defaultReadLoop()
|
||||
return
|
||||
}
|
||||
|
||||
// x/net version
|
||||
var src string
|
||||
msgs := make([]ipv4.Message, batchSize)
|
||||
for k := range msgs {
|
||||
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
|
||||
}
|
||||
|
||||
for {
|
||||
if count, err := s.xconn.ReadBatch(msgs, 0); err == nil {
|
||||
for i := 0; i < count; i++ {
|
||||
msg := &msgs[i]
|
||||
// make sure the packet is from the same source
|
||||
if src == "" { // set source address if nil
|
||||
src = msg.Addr.String()
|
||||
} else if msg.Addr.String() != src {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.N < s.headerSize+IKCP_OVERHEAD {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
continue
|
||||
}
|
||||
|
||||
// source and size has validated
|
||||
s.packetInput(msg.Buffers[0][:msg.N])
|
||||
}
|
||||
} else {
|
||||
// compatibility issue:
|
||||
// for linux kernel<=2.6.32, support for sendmmsg is not available
|
||||
// an error of type os.SyscallError will be returned
|
||||
if operr, ok := err.(*net.OpError); ok {
|
||||
if se, ok := operr.Err.(*os.SyscallError); ok {
|
||||
if se.Syscall == "recvmmsg" {
|
||||
s.defaultReadLoop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
s.notifyReadError(errors.WithStack(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// monitor incoming data for all connections of server
|
||||
func (l *Listener) monitor() {
|
||||
var xconn batchConn
|
||||
if _, ok := l.conn.(*net.UDPConn); ok {
|
||||
addr, err := net.ResolveUDPAddr("udp", l.conn.LocalAddr().String())
|
||||
if err == nil {
|
||||
if addr.IP.To4() != nil {
|
||||
xconn = ipv4.NewPacketConn(l.conn)
|
||||
} else {
|
||||
xconn = ipv6.NewPacketConn(l.conn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// default version
|
||||
if xconn == nil {
|
||||
l.defaultMonitor()
|
||||
return
|
||||
}
|
||||
|
||||
// x/net version
|
||||
msgs := make([]ipv4.Message, batchSize)
|
||||
for k := range msgs {
|
||||
msgs[k].Buffers = [][]byte{make([]byte, mtuLimit)}
|
||||
}
|
||||
|
||||
for {
|
||||
if count, err := xconn.ReadBatch(msgs, 0); err == nil {
|
||||
for i := 0; i < count; i++ {
|
||||
msg := &msgs[i]
|
||||
if msg.N >= l.headerSize+IKCP_OVERHEAD {
|
||||
l.packetInput(msg.Buffers[0][:msg.N], msg.Addr)
|
||||
} else {
|
||||
atomic.AddUint64(&DefaultSnmp.InErrs, 1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// compatibility issue:
|
||||
// for linux kernel<=2.6.32, support for sendmmsg is not available
|
||||
// an error of type os.SyscallError will be returned
|
||||
if operr, ok := err.(*net.OpError); ok {
|
||||
if se, ok := operr.Err.(*os.SyscallError); ok {
|
||||
if se.Syscall == "recvmmsg" {
|
||||
l.defaultMonitor()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
l.notifyReadError(errors.WithStack(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
1021
vendor/github.com/fatedier/kcp-go/sess.go
generated
vendored
1021
vendor/github.com/fatedier/kcp-go/sess.go
generated
vendored
File diff suppressed because it is too large
Load Diff
164
vendor/github.com/fatedier/kcp-go/snmp.go
generated
vendored
164
vendor/github.com/fatedier/kcp-go/snmp.go
generated
vendored
@ -1,164 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Snmp defines network statistics indicator
|
||||
type Snmp struct {
|
||||
BytesSent uint64 // bytes sent from upper level
|
||||
BytesReceived uint64 // bytes received to upper level
|
||||
MaxConn uint64 // max number of connections ever reached
|
||||
ActiveOpens uint64 // accumulated active open connections
|
||||
PassiveOpens uint64 // accumulated passive open connections
|
||||
CurrEstab uint64 // current number of established connections
|
||||
InErrs uint64 // UDP read errors reported from net.PacketConn
|
||||
InCsumErrors uint64 // checksum errors from CRC32
|
||||
KCPInErrors uint64 // packet iput errors reported from KCP
|
||||
InPkts uint64 // incoming packets count
|
||||
OutPkts uint64 // outgoing packets count
|
||||
InSegs uint64 // incoming KCP segments
|
||||
OutSegs uint64 // outgoing KCP segments
|
||||
InBytes uint64 // UDP bytes received
|
||||
OutBytes uint64 // UDP bytes sent
|
||||
RetransSegs uint64 // accmulated retransmited segments
|
||||
FastRetransSegs uint64 // accmulated fast retransmitted segments
|
||||
EarlyRetransSegs uint64 // accmulated early retransmitted segments
|
||||
LostSegs uint64 // number of segs infered as lost
|
||||
RepeatSegs uint64 // number of segs duplicated
|
||||
FECRecovered uint64 // correct packets recovered from FEC
|
||||
FECErrs uint64 // incorrect packets recovered from FEC
|
||||
FECParityShards uint64 // FEC segments received
|
||||
FECShortShards uint64 // number of data shards that's not enough for recovery
|
||||
}
|
||||
|
||||
func newSnmp() *Snmp {
|
||||
return new(Snmp)
|
||||
}
|
||||
|
||||
// Header returns all field names
|
||||
func (s *Snmp) Header() []string {
|
||||
return []string{
|
||||
"BytesSent",
|
||||
"BytesReceived",
|
||||
"MaxConn",
|
||||
"ActiveOpens",
|
||||
"PassiveOpens",
|
||||
"CurrEstab",
|
||||
"InErrs",
|
||||
"InCsumErrors",
|
||||
"KCPInErrors",
|
||||
"InPkts",
|
||||
"OutPkts",
|
||||
"InSegs",
|
||||
"OutSegs",
|
||||
"InBytes",
|
||||
"OutBytes",
|
||||
"RetransSegs",
|
||||
"FastRetransSegs",
|
||||
"EarlyRetransSegs",
|
||||
"LostSegs",
|
||||
"RepeatSegs",
|
||||
"FECParityShards",
|
||||
"FECErrs",
|
||||
"FECRecovered",
|
||||
"FECShortShards",
|
||||
}
|
||||
}
|
||||
|
||||
// ToSlice returns current snmp info as slice
|
||||
func (s *Snmp) ToSlice() []string {
|
||||
snmp := s.Copy()
|
||||
return []string{
|
||||
fmt.Sprint(snmp.BytesSent),
|
||||
fmt.Sprint(snmp.BytesReceived),
|
||||
fmt.Sprint(snmp.MaxConn),
|
||||
fmt.Sprint(snmp.ActiveOpens),
|
||||
fmt.Sprint(snmp.PassiveOpens),
|
||||
fmt.Sprint(snmp.CurrEstab),
|
||||
fmt.Sprint(snmp.InErrs),
|
||||
fmt.Sprint(snmp.InCsumErrors),
|
||||
fmt.Sprint(snmp.KCPInErrors),
|
||||
fmt.Sprint(snmp.InPkts),
|
||||
fmt.Sprint(snmp.OutPkts),
|
||||
fmt.Sprint(snmp.InSegs),
|
||||
fmt.Sprint(snmp.OutSegs),
|
||||
fmt.Sprint(snmp.InBytes),
|
||||
fmt.Sprint(snmp.OutBytes),
|
||||
fmt.Sprint(snmp.RetransSegs),
|
||||
fmt.Sprint(snmp.FastRetransSegs),
|
||||
fmt.Sprint(snmp.EarlyRetransSegs),
|
||||
fmt.Sprint(snmp.LostSegs),
|
||||
fmt.Sprint(snmp.RepeatSegs),
|
||||
fmt.Sprint(snmp.FECParityShards),
|
||||
fmt.Sprint(snmp.FECErrs),
|
||||
fmt.Sprint(snmp.FECRecovered),
|
||||
fmt.Sprint(snmp.FECShortShards),
|
||||
}
|
||||
}
|
||||
|
||||
// Copy make a copy of current snmp snapshot
|
||||
func (s *Snmp) Copy() *Snmp {
|
||||
d := newSnmp()
|
||||
d.BytesSent = atomic.LoadUint64(&s.BytesSent)
|
||||
d.BytesReceived = atomic.LoadUint64(&s.BytesReceived)
|
||||
d.MaxConn = atomic.LoadUint64(&s.MaxConn)
|
||||
d.ActiveOpens = atomic.LoadUint64(&s.ActiveOpens)
|
||||
d.PassiveOpens = atomic.LoadUint64(&s.PassiveOpens)
|
||||
d.CurrEstab = atomic.LoadUint64(&s.CurrEstab)
|
||||
d.InErrs = atomic.LoadUint64(&s.InErrs)
|
||||
d.InCsumErrors = atomic.LoadUint64(&s.InCsumErrors)
|
||||
d.KCPInErrors = atomic.LoadUint64(&s.KCPInErrors)
|
||||
d.InPkts = atomic.LoadUint64(&s.InPkts)
|
||||
d.OutPkts = atomic.LoadUint64(&s.OutPkts)
|
||||
d.InSegs = atomic.LoadUint64(&s.InSegs)
|
||||
d.OutSegs = atomic.LoadUint64(&s.OutSegs)
|
||||
d.InBytes = atomic.LoadUint64(&s.InBytes)
|
||||
d.OutBytes = atomic.LoadUint64(&s.OutBytes)
|
||||
d.RetransSegs = atomic.LoadUint64(&s.RetransSegs)
|
||||
d.FastRetransSegs = atomic.LoadUint64(&s.FastRetransSegs)
|
||||
d.EarlyRetransSegs = atomic.LoadUint64(&s.EarlyRetransSegs)
|
||||
d.LostSegs = atomic.LoadUint64(&s.LostSegs)
|
||||
d.RepeatSegs = atomic.LoadUint64(&s.RepeatSegs)
|
||||
d.FECParityShards = atomic.LoadUint64(&s.FECParityShards)
|
||||
d.FECErrs = atomic.LoadUint64(&s.FECErrs)
|
||||
d.FECRecovered = atomic.LoadUint64(&s.FECRecovered)
|
||||
d.FECShortShards = atomic.LoadUint64(&s.FECShortShards)
|
||||
return d
|
||||
}
|
||||
|
||||
// Reset values to zero
|
||||
func (s *Snmp) Reset() {
|
||||
atomic.StoreUint64(&s.BytesSent, 0)
|
||||
atomic.StoreUint64(&s.BytesReceived, 0)
|
||||
atomic.StoreUint64(&s.MaxConn, 0)
|
||||
atomic.StoreUint64(&s.ActiveOpens, 0)
|
||||
atomic.StoreUint64(&s.PassiveOpens, 0)
|
||||
atomic.StoreUint64(&s.CurrEstab, 0)
|
||||
atomic.StoreUint64(&s.InErrs, 0)
|
||||
atomic.StoreUint64(&s.InCsumErrors, 0)
|
||||
atomic.StoreUint64(&s.KCPInErrors, 0)
|
||||
atomic.StoreUint64(&s.InPkts, 0)
|
||||
atomic.StoreUint64(&s.OutPkts, 0)
|
||||
atomic.StoreUint64(&s.InSegs, 0)
|
||||
atomic.StoreUint64(&s.OutSegs, 0)
|
||||
atomic.StoreUint64(&s.InBytes, 0)
|
||||
atomic.StoreUint64(&s.OutBytes, 0)
|
||||
atomic.StoreUint64(&s.RetransSegs, 0)
|
||||
atomic.StoreUint64(&s.FastRetransSegs, 0)
|
||||
atomic.StoreUint64(&s.EarlyRetransSegs, 0)
|
||||
atomic.StoreUint64(&s.LostSegs, 0)
|
||||
atomic.StoreUint64(&s.RepeatSegs, 0)
|
||||
atomic.StoreUint64(&s.FECParityShards, 0)
|
||||
atomic.StoreUint64(&s.FECErrs, 0)
|
||||
atomic.StoreUint64(&s.FECRecovered, 0)
|
||||
atomic.StoreUint64(&s.FECShortShards, 0)
|
||||
}
|
||||
|
||||
// DefaultSnmp is the global KCP connection statistics collector
|
||||
var DefaultSnmp *Snmp
|
||||
|
||||
func init() {
|
||||
DefaultSnmp = newSnmp()
|
||||
}
|
25
vendor/github.com/fatedier/kcp-go/tx.go
generated
vendored
25
vendor/github.com/fatedier/kcp-go/tx.go
generated
vendored
@ -1,25 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
func (s *UDPSession) defaultTx(txqueue []ipv4.Message) {
|
||||
nbytes := 0
|
||||
npkts := 0
|
||||
for k := range txqueue {
|
||||
if n, err := s.conn.WriteTo(txqueue[k].Buffers[0], txqueue[k].Addr); err == nil {
|
||||
nbytes += n
|
||||
npkts++
|
||||
xmitBuf.Put(txqueue[k].Buffers[0])
|
||||
} else {
|
||||
s.notifyWriteError(errors.WithStack(err))
|
||||
break
|
||||
}
|
||||
}
|
||||
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
|
||||
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
|
||||
}
|
11
vendor/github.com/fatedier/kcp-go/tx_generic.go
generated
vendored
11
vendor/github.com/fatedier/kcp-go/tx_generic.go
generated
vendored
@ -1,11 +0,0 @@
|
||||
// +build !linux
|
||||
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
func (s *UDPSession) tx(txqueue []ipv4.Message) {
|
||||
s.defaultTx(txqueue)
|
||||
}
|
52
vendor/github.com/fatedier/kcp-go/tx_linux.go
generated
vendored
52
vendor/github.com/fatedier/kcp-go/tx_linux.go
generated
vendored
@ -1,52 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
func (s *UDPSession) tx(txqueue []ipv4.Message) {
|
||||
// default version
|
||||
if s.xconn == nil || s.xconnWriteError != nil {
|
||||
s.defaultTx(txqueue)
|
||||
return
|
||||
}
|
||||
|
||||
// x/net version
|
||||
nbytes := 0
|
||||
npkts := 0
|
||||
for len(txqueue) > 0 {
|
||||
if n, err := s.xconn.WriteBatch(txqueue, 0); err == nil {
|
||||
for k := range txqueue[:n] {
|
||||
nbytes += len(txqueue[k].Buffers[0])
|
||||
xmitBuf.Put(txqueue[k].Buffers[0])
|
||||
}
|
||||
npkts += n
|
||||
txqueue = txqueue[n:]
|
||||
} else {
|
||||
// compatibility issue:
|
||||
// for linux kernel<=2.6.32, support for sendmmsg is not available
|
||||
// an error of type os.SyscallError will be returned
|
||||
if operr, ok := err.(*net.OpError); ok {
|
||||
if se, ok := operr.Err.(*os.SyscallError); ok {
|
||||
if se.Syscall == "sendmmsg" {
|
||||
s.xconnWriteError = se
|
||||
s.defaultTx(txqueue)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
s.notifyWriteError(errors.WithStack(err))
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddUint64(&DefaultSnmp.OutPkts, uint64(npkts))
|
||||
atomic.AddUint64(&DefaultSnmp.OutBytes, uint64(nbytes))
|
||||
}
|
104
vendor/github.com/fatedier/kcp-go/updater.go
generated
vendored
104
vendor/github.com/fatedier/kcp-go/updater.go
generated
vendored
@ -1,104 +0,0 @@
|
||||
package kcp
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var updater updateHeap
|
||||
|
||||
func init() {
|
||||
updater.init()
|
||||
go updater.updateTask()
|
||||
}
|
||||
|
||||
// entry contains a session update info
|
||||
type entry struct {
|
||||
ts time.Time
|
||||
s *UDPSession
|
||||
}
|
||||
|
||||
// a global heap managed kcp.flush() caller
|
||||
type updateHeap struct {
|
||||
entries []entry
|
||||
mu sync.Mutex
|
||||
chWakeUp chan struct{}
|
||||
}
|
||||
|
||||
func (h *updateHeap) Len() int { return len(h.entries) }
|
||||
func (h *updateHeap) Less(i, j int) bool { return h.entries[i].ts.Before(h.entries[j].ts) }
|
||||
func (h *updateHeap) Swap(i, j int) {
|
||||
h.entries[i], h.entries[j] = h.entries[j], h.entries[i]
|
||||
h.entries[i].s.updaterIdx = i
|
||||
h.entries[j].s.updaterIdx = j
|
||||
}
|
||||
|
||||
func (h *updateHeap) Push(x interface{}) {
|
||||
h.entries = append(h.entries, x.(entry))
|
||||
n := len(h.entries)
|
||||
h.entries[n-1].s.updaterIdx = n - 1
|
||||
}
|
||||
|
||||
func (h *updateHeap) Pop() interface{} {
|
||||
n := len(h.entries)
|
||||
x := h.entries[n-1]
|
||||
h.entries[n-1].s.updaterIdx = -1
|
||||
h.entries[n-1] = entry{} // manual set nil for GC
|
||||
h.entries = h.entries[0 : n-1]
|
||||
return x
|
||||
}
|
||||
|
||||
func (h *updateHeap) init() {
|
||||
h.chWakeUp = make(chan struct{}, 1)
|
||||
}
|
||||
|
||||
func (h *updateHeap) addSession(s *UDPSession) {
|
||||
h.mu.Lock()
|
||||
heap.Push(h, entry{time.Now(), s})
|
||||
h.mu.Unlock()
|
||||
h.wakeup()
|
||||
}
|
||||
|
||||
func (h *updateHeap) removeSession(s *UDPSession) {
|
||||
h.mu.Lock()
|
||||
if s.updaterIdx != -1 {
|
||||
heap.Remove(h, s.updaterIdx)
|
||||
}
|
||||
h.mu.Unlock()
|
||||
}
|
||||
|
||||
func (h *updateHeap) wakeup() {
|
||||
select {
|
||||
case h.chWakeUp <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (h *updateHeap) updateTask() {
|
||||
timer := time.NewTimer(0)
|
||||
for {
|
||||
select {
|
||||
case <-timer.C:
|
||||
case <-h.chWakeUp:
|
||||
}
|
||||
|
||||
h.mu.Lock()
|
||||
hlen := h.Len()
|
||||
for i := 0; i < hlen; i++ {
|
||||
entry := &h.entries[0]
|
||||
if !time.Now().Before(entry.ts) {
|
||||
interval := entry.s.update()
|
||||
entry.ts = time.Now().Add(interval)
|
||||
heap.Fix(h, 0)
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if hlen > 0 {
|
||||
timer.Reset(h.entries[0].ts.Sub(time.Now()))
|
||||
}
|
||||
h.mu.Unlock()
|
||||
}
|
||||
}
|
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
16
vendor/github.com/golang/snappy/.gitignore
generated
vendored
@ -1,16 +0,0 @@
|
||||
cmd/snappytool/snappytool
|
||||
testdata/bench
|
||||
|
||||
# These explicitly listed benchmark data files are for an obsolete version of
|
||||
# snappy_test.go.
|
||||
testdata/alice29.txt
|
||||
testdata/asyoulik.txt
|
||||
testdata/fireworks.jpeg
|
||||
testdata/geo.protodata
|
||||
testdata/html
|
||||
testdata/html_x_4
|
||||
testdata/kppkn.gtb
|
||||
testdata/lcet10.txt
|
||||
testdata/paper-100k.pdf
|
||||
testdata/plrabn12.txt
|
||||
testdata/urls.10K
|
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
15
vendor/github.com/golang/snappy/AUTHORS
generated
vendored
@ -1,15 +0,0 @@
|
||||
# This is the official list of Snappy-Go authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Google Inc.
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
37
vendor/github.com/golang/snappy/CONTRIBUTORS
generated
vendored
@ -1,37 +0,0 @@
|
||||
# This is the official list of people who can contribute
|
||||
# (and typically have contributed) code to the Snappy-Go repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Damian Gryski <dgryski@gmail.com>
|
||||
Jan Mercl <0xjnml@gmail.com>
|
||||
Kai Backman <kaib@golang.org>
|
||||
Marc-Antoine Ruel <maruel@chromium.org>
|
||||
Nigel Tao <nigeltao@golang.org>
|
||||
Rob Pike <r@golang.org>
|
||||
Rodolfo Carvalho <rhcarvalho@gmail.com>
|
||||
Russ Cox <rsc@golang.org>
|
||||
Sebastien Binet <seb.binet@gmail.com>
|
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
27
vendor/github.com/golang/snappy/LICENSE
generated
vendored
@ -1,27 +0,0 @@
|
||||
Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
107
vendor/github.com/golang/snappy/README
generated
vendored
107
vendor/github.com/golang/snappy/README
generated
vendored
@ -1,107 +0,0 @@
|
||||
The Snappy compression format in the Go programming language.
|
||||
|
||||
To download and install from source:
|
||||
$ go get github.com/golang/snappy
|
||||
|
||||
Unless otherwise noted, the Snappy-Go source files are distributed
|
||||
under the BSD-style license found in the LICENSE file.
|
||||
|
||||
|
||||
|
||||
Benchmarks.
|
||||
|
||||
The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
|
||||
or so files, the same set used by the C++ Snappy code (github.com/google/snappy
|
||||
and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
|
||||
3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
|
||||
|
||||
"go test -test.bench=."
|
||||
|
||||
_UFlat0-8 2.19GB/s ± 0% html
|
||||
_UFlat1-8 1.41GB/s ± 0% urls
|
||||
_UFlat2-8 23.5GB/s ± 2% jpg
|
||||
_UFlat3-8 1.91GB/s ± 0% jpg_200
|
||||
_UFlat4-8 14.0GB/s ± 1% pdf
|
||||
_UFlat5-8 1.97GB/s ± 0% html4
|
||||
_UFlat6-8 814MB/s ± 0% txt1
|
||||
_UFlat7-8 785MB/s ± 0% txt2
|
||||
_UFlat8-8 857MB/s ± 0% txt3
|
||||
_UFlat9-8 719MB/s ± 1% txt4
|
||||
_UFlat10-8 2.84GB/s ± 0% pb
|
||||
_UFlat11-8 1.05GB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 1.04GB/s ± 0% html
|
||||
_ZFlat1-8 534MB/s ± 0% urls
|
||||
_ZFlat2-8 15.7GB/s ± 1% jpg
|
||||
_ZFlat3-8 740MB/s ± 3% jpg_200
|
||||
_ZFlat4-8 9.20GB/s ± 1% pdf
|
||||
_ZFlat5-8 991MB/s ± 0% html4
|
||||
_ZFlat6-8 379MB/s ± 0% txt1
|
||||
_ZFlat7-8 352MB/s ± 0% txt2
|
||||
_ZFlat8-8 396MB/s ± 1% txt3
|
||||
_ZFlat9-8 327MB/s ± 1% txt4
|
||||
_ZFlat10-8 1.33GB/s ± 1% pb
|
||||
_ZFlat11-8 605MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
"go test -test.bench=. -tags=noasm"
|
||||
|
||||
_UFlat0-8 621MB/s ± 2% html
|
||||
_UFlat1-8 494MB/s ± 1% urls
|
||||
_UFlat2-8 23.2GB/s ± 1% jpg
|
||||
_UFlat3-8 1.12GB/s ± 1% jpg_200
|
||||
_UFlat4-8 4.35GB/s ± 1% pdf
|
||||
_UFlat5-8 609MB/s ± 0% html4
|
||||
_UFlat6-8 296MB/s ± 0% txt1
|
||||
_UFlat7-8 288MB/s ± 0% txt2
|
||||
_UFlat8-8 309MB/s ± 1% txt3
|
||||
_UFlat9-8 280MB/s ± 1% txt4
|
||||
_UFlat10-8 753MB/s ± 0% pb
|
||||
_UFlat11-8 400MB/s ± 0% gaviota
|
||||
|
||||
_ZFlat0-8 409MB/s ± 1% html
|
||||
_ZFlat1-8 250MB/s ± 1% urls
|
||||
_ZFlat2-8 12.3GB/s ± 1% jpg
|
||||
_ZFlat3-8 132MB/s ± 0% jpg_200
|
||||
_ZFlat4-8 2.92GB/s ± 0% pdf
|
||||
_ZFlat5-8 405MB/s ± 1% html4
|
||||
_ZFlat6-8 179MB/s ± 1% txt1
|
||||
_ZFlat7-8 170MB/s ± 1% txt2
|
||||
_ZFlat8-8 189MB/s ± 1% txt3
|
||||
_ZFlat9-8 164MB/s ± 1% txt4
|
||||
_ZFlat10-8 479MB/s ± 1% pb
|
||||
_ZFlat11-8 270MB/s ± 1% gaviota
|
||||
|
||||
|
||||
|
||||
For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
|
||||
are the numbers from C++ Snappy's
|
||||
|
||||
make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
|
||||
|
||||
BM_UFlat/0 2.4GB/s html
|
||||
BM_UFlat/1 1.4GB/s urls
|
||||
BM_UFlat/2 21.8GB/s jpg
|
||||
BM_UFlat/3 1.5GB/s jpg_200
|
||||
BM_UFlat/4 13.3GB/s pdf
|
||||
BM_UFlat/5 2.1GB/s html4
|
||||
BM_UFlat/6 1.0GB/s txt1
|
||||
BM_UFlat/7 959.4MB/s txt2
|
||||
BM_UFlat/8 1.0GB/s txt3
|
||||
BM_UFlat/9 864.5MB/s txt4
|
||||
BM_UFlat/10 2.9GB/s pb
|
||||
BM_UFlat/11 1.2GB/s gaviota
|
||||
|
||||
BM_ZFlat/0 944.3MB/s html (22.31 %)
|
||||
BM_ZFlat/1 501.6MB/s urls (47.78 %)
|
||||
BM_ZFlat/2 14.3GB/s jpg (99.95 %)
|
||||
BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
|
||||
BM_ZFlat/4 8.3GB/s pdf (83.30 %)
|
||||
BM_ZFlat/5 903.5MB/s html4 (22.52 %)
|
||||
BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
|
||||
BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
|
||||
BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
|
||||
BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
|
||||
BM_ZFlat/10 1.2GB/s pb (19.68 %)
|
||||
BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
|
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
237
vendor/github.com/golang/snappy/decode.go
generated
vendored
@ -1,237 +0,0 @@
|
||||
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package snappy
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrCorrupt reports that the input is invalid.
|
||||
ErrCorrupt = errors.New("snappy: corrupt input")
|
||||
// ErrTooLarge reports that the uncompressed length is too large.
|
||||
ErrTooLarge = errors.New("snappy: decoded block is too large")
|
||||
// ErrUnsupported reports that the input isn't supported.
|
||||
ErrUnsupported = errors.New("snappy: unsupported input")
|
||||
|
||||
errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
|
||||
)
|
||||
|
||||
// DecodedLen returns the length of the decoded block.
|
||||
func DecodedLen(src []byte) (int, error) {
|
||||
v, _, err := decodedLen(src)
|
||||
return v, err
|
||||
}
|
||||
|
||||
// decodedLen returns the length of the decoded block and the number of bytes
|
||||
// that the length header occupied.
|
||||
func decodedLen(src []byte) (blockLen, headerLen int, err error) {
|
||||
v, n := binary.Uvarint(src)
|
||||
if n <= 0 || v > 0xffffffff {
|
||||
return 0, 0, ErrCorrupt
|
||||
}
|
||||
|
||||
const wordSize = 32 << (^uint(0) >> 32 & 1)
|
||||
if wordSize == 32 && v > 0x7fffffff {
|
||||
return 0, 0, ErrTooLarge
|
||||
}
|
||||
return int(v), n, nil
|
||||
}
|
||||
|
||||
const (
|
||||
decodeErrCodeCorrupt = 1
|
||||
decodeErrCodeUnsupportedLiteralLength = 2
|
||||
)
|
||||
|
||||
// Decode returns the decoded form of src. The returned slice may be a sub-
|
||||
// slice of dst if dst was large enough to hold the entire decoded block.
|
||||
// Otherwise, a newly allocated slice will be returned.
|
||||
//
|
||||
// The dst and src must not overlap. It is valid to pass a nil dst.
|
||||
func Decode(dst, src []byte) ([]byte, error) {
|
||||
dLen, s, err := decodedLen(src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if dLen <= len(dst) {
|
||||
dst = dst[:dLen]
|
||||
} else {
|
||||
dst = make([]byte, dLen)
|
||||
}
|
||||
switch decode(dst, src[s:]) {
|
||||
case 0:
|
||||
return dst, nil
|
||||
case decodeErrCodeUnsupportedLiteralLength:
|
||||
return nil, errUnsupportedLiteralLength
|
||||
}
|
||||
return nil, ErrCorrupt
|
||||
}
|
||||
|
||||
// NewReader returns a new Reader that decompresses from r, using the framing
|
||||
// format described at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
func NewReader(r io.Reader) *Reader {
|
||||
return &Reader{
|
||||
r: r,
|
||||
decoded: make([]byte, maxBlockSize),
|
||||
buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Reader is an io.Reader that can read Snappy-compressed bytes.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
err error
|
||||
decoded []byte
|
||||
buf []byte
|
||||
// decoded[i:j] contains decoded bytes that have not yet been passed on.
|
||||
i, j int
|
||||
readHeader bool
|
||||
}
|
||||
|
||||
// Reset discards any buffered data, resets all state, and switches the Snappy
|
||||
// reader to read from r. This permits reusing a Reader rather than allocating
|
||||
// a new one.
|
||||
func (r *Reader) Reset(reader io.Reader) {
|
||||
r.r = reader
|
||||
r.err = nil
|
||||
r.i = 0
|
||||
r.j = 0
|
||||
r.readHeader = false
|
||||
}
|
||||
|
||||
func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
|
||||
if _, r.err = io.ReadFull(r.r, p); r.err != nil {
|
||||
if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
|
||||
r.err = ErrCorrupt
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Read satisfies the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
if r.err != nil {
|
||||
return 0, r.err
|
||||
}
|
||||
for {
|
||||
if r.i < r.j {
|
||||
n := copy(p, r.decoded[r.i:r.j])
|
||||
r.i += n
|
||||
return n, nil
|
||||
}
|
||||
if !r.readFull(r.buf[:4], true) {
|
||||
return 0, r.err
|
||||
}
|
||||
chunkType := r.buf[0]
|
||||
if !r.readHeader {
|
||||
if chunkType != chunkTypeStreamIdentifier {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.readHeader = true
|
||||
}
|
||||
chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
|
||||
if chunkLen > len(r.buf) {
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// The chunk types are specified at
|
||||
// https://github.com/google/snappy/blob/master/framing_format.txt
|
||||
switch chunkType {
|
||||
case chunkTypeCompressedData:
|
||||
// Section 4.2. Compressed data (chunk type 0x00).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:chunkLen]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
buf = buf[checksumSize:]
|
||||
|
||||
n, err := DecodedLen(buf)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if _, err := Decode(r.decoded, buf); err != nil {
|
||||
r.err = err
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeUncompressedData:
|
||||
// Section 4.3. Uncompressed data (chunk type 0x01).
|
||||
if chunkLen < checksumSize {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
buf := r.buf[:checksumSize]
|
||||
if !r.readFull(buf, false) {
|
||||
return 0, r.err
|
||||
}
|
||||
checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
|
||||
// Read directly into r.decoded instead of via r.buf.
|
||||
n := chunkLen - checksumSize
|
||||
if n > len(r.decoded) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.decoded[:n], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
if crc(r.decoded[:n]) != checksum {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
r.i, r.j = 0, n
|
||||
continue
|
||||
|
||||
case chunkTypeStreamIdentifier:
|
||||
// Section 4.1. Stream identifier (chunk type 0xff).
|
||||
if chunkLen != len(magicBody) {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
if !r.readFull(r.buf[:len(magicBody)], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
for i := 0; i < len(magicBody); i++ {
|
||||
if r.buf[i] != magicBody[i] {
|
||||
r.err = ErrCorrupt
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if chunkType <= 0x7f {
|
||||
// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
|
||||
r.err = ErrUnsupported
|
||||
return 0, r.err
|
||||
}
|
||||
// Section 4.4 Padding (chunk type 0xfe).
|
||||
// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
|
||||
if !r.readFull(r.buf[:chunkLen], false) {
|
||||
return 0, r.err
|
||||
}
|
||||
}
|
||||
}
|
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
14
vendor/github.com/golang/snappy/decode_amd64.go
generated
vendored
@ -1,14 +0,0 @@
|
||||
// Copyright 2016 The Snappy-Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
package snappy
|
||||
|
||||
// decode has the same semantics as in decode_other.go.
|
||||
//
|
||||
//go:noescape
|
||||
func decode(dst, src []byte) int
|
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
490
vendor/github.com/golang/snappy/decode_amd64.s
generated
vendored
@ -1,490 +0,0 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// The asm code generally follows the pure Go code in decode_other.go, except
|
||||
// where marked with a "!!!".
|
||||
|
||||
// func decode(dst, src []byte) int
|
||||
//
|
||||
// All local variables fit into registers. The non-zero stack size is only to
|
||||
// spill registers and push args when issuing a CALL. The register allocation:
|
||||
// - AX scratch
|
||||
// - BX scratch
|
||||
// - CX length or x
|
||||
// - DX offset
|
||||
// - SI &src[s]
|
||||
// - DI &dst[d]
|
||||
// + R8 dst_base
|
||||
// + R9 dst_len
|
||||
// + R10 dst_base + dst_len
|
||||
// + R11 src_base
|
||||
// + R12 src_len
|
||||
// + R13 src_base + src_len
|
||||
// - R14 used by doCopy
|
||||
// - R15 used by doCopy
|
||||
//
|
||||
// The registers R8-R13 (marked with a "+") are set at the start of the
|
||||
// function, and after a CALL returns, and are not otherwise modified.
|
||||
//
|
||||
// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
|
||||
// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
|
||||
TEXT ·decode(SB), NOSPLIT, $48-56
|
||||
// Initialize SI, DI and R8-R13.
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, DI
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, SI
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
loop:
|
||||
// for s < len(src)
|
||||
CMPQ SI, R13
|
||||
JEQ end
|
||||
|
||||
// CX = uint32(src[s])
|
||||
//
|
||||
// switch src[s] & 0x03
|
||||
MOVBLZX (SI), CX
|
||||
MOVL CX, BX
|
||||
ANDL $3, BX
|
||||
CMPL BX, $1
|
||||
JAE tagCopy
|
||||
|
||||
// ----------------------------------------
|
||||
// The code below handles literal tags.
|
||||
|
||||
// case tagLiteral:
|
||||
// x := uint32(src[s] >> 2)
|
||||
// switch
|
||||
SHRL $2, CX
|
||||
CMPL CX, $60
|
||||
JAE tagLit60Plus
|
||||
|
||||
// case x < 60:
|
||||
// s++
|
||||
INCQ SI
|
||||
|
||||
doLit:
|
||||
// This is the end of the inner "switch", when we have a literal tag.
|
||||
//
|
||||
// We assume that CX == x and x fits in a uint32, where x is the variable
|
||||
// used in the pure Go decode_other.go code.
|
||||
|
||||
// length = int(x) + 1
|
||||
//
|
||||
// Unlike the pure Go code, we don't need to check if length <= 0 because
|
||||
// CX can hold 64 bits, so the increment cannot overflow.
|
||||
INCQ CX
|
||||
|
||||
// Prepare to check if copying length bytes will run past the end of dst or
|
||||
// src.
|
||||
//
|
||||
// AX = len(dst) - d
|
||||
// BX = len(src) - s
|
||||
MOVQ R10, AX
|
||||
SUBQ DI, AX
|
||||
MOVQ R13, BX
|
||||
SUBQ SI, BX
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) copies.
|
||||
//
|
||||
// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
|
||||
// goto callMemmove // Fall back on calling runtime·memmove.
|
||||
// }
|
||||
//
|
||||
// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
|
||||
// against 21 instead of 16, because it cannot assume that all of its input
|
||||
// is contiguous in memory and so it needs to leave enough source bytes to
|
||||
// read the next tag without refilling buffers, but Go's Decode assumes
|
||||
// contiguousness (the src argument is a []byte).
|
||||
CMPQ CX, $16
|
||||
JGT callMemmove
|
||||
CMPQ AX, $16
|
||||
JLT callMemmove
|
||||
CMPQ BX, $16
|
||||
JLT callMemmove
|
||||
|
||||
// !!! Implement the copy from src to dst as a 16-byte load and store.
|
||||
// (Decode's documentation says that dst and src must not overlap.)
|
||||
//
|
||||
// This always copies 16 bytes, instead of only length bytes, but that's
|
||||
// OK. If the input is a valid Snappy encoding then subsequent iterations
|
||||
// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
|
||||
// non-nil error), so the overrun will be ignored.
|
||||
//
|
||||
// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
|
||||
// 16-byte loads and stores. This technique probably wouldn't be as
|
||||
// effective on architectures that are fussier about alignment.
|
||||
MOVOU 0(SI), X0
|
||||
MOVOU X0, 0(DI)
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
callMemmove:
|
||||
// if length > len(dst)-d || length > len(src)-s { etc }
|
||||
CMPQ CX, AX
|
||||
JGT errCorrupt
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// copy(dst[d:], src[s:s+length])
|
||||
//
|
||||
// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
|
||||
// DI, SI and CX as arguments. Coincidentally, we also need to spill those
|
||||
// three registers to the stack, to save local variables across the CALL.
|
||||
MOVQ DI, 0(SP)
|
||||
MOVQ SI, 8(SP)
|
||||
MOVQ CX, 16(SP)
|
||||
MOVQ DI, 24(SP)
|
||||
MOVQ SI, 32(SP)
|
||||
MOVQ CX, 40(SP)
|
||||
CALL runtime·memmove(SB)
|
||||
|
||||
// Restore local variables: unspill registers from the stack and
|
||||
// re-calculate R8-R13.
|
||||
MOVQ 24(SP), DI
|
||||
MOVQ 32(SP), SI
|
||||
MOVQ 40(SP), CX
|
||||
MOVQ dst_base+0(FP), R8
|
||||
MOVQ dst_len+8(FP), R9
|
||||
MOVQ R8, R10
|
||||
ADDQ R9, R10
|
||||
MOVQ src_base+24(FP), R11
|
||||
MOVQ src_len+32(FP), R12
|
||||
MOVQ R11, R13
|
||||
ADDQ R12, R13
|
||||
|
||||
// d += length
|
||||
// s += length
|
||||
ADDQ CX, DI
|
||||
ADDQ CX, SI
|
||||
JMP loop
|
||||
|
||||
tagLit60Plus:
|
||||
// !!! This fragment does the
|
||||
//
|
||||
// s += x - 58; if uint(s) > uint(len(src)) { etc }
|
||||
//
|
||||
// checks. In the asm version, we code it once instead of once per switch case.
|
||||
ADDQ CX, SI
|
||||
SUBQ $58, SI
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// case x == 60:
|
||||
CMPL CX, $61
|
||||
JEQ tagLit61
|
||||
JA tagLit62Plus
|
||||
|
||||
// x = uint32(src[s-1])
|
||||
MOVBLZX -1(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit61:
|
||||
// case x == 61:
|
||||
// x = uint32(src[s-2]) | uint32(src[s-1])<<8
|
||||
MOVWLZX -2(SI), CX
|
||||
JMP doLit
|
||||
|
||||
tagLit62Plus:
|
||||
CMPL CX, $62
|
||||
JA tagLit63
|
||||
|
||||
// case x == 62:
|
||||
// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
|
||||
MOVWLZX -3(SI), CX
|
||||
MOVBLZX -1(SI), BX
|
||||
SHLL $16, BX
|
||||
ORL BX, CX
|
||||
JMP doLit
|
||||
|
||||
tagLit63:
|
||||
// case x == 63:
|
||||
// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
|
||||
MOVL -4(SI), CX
|
||||
JMP doLit
|
||||
|
||||
// The code above handles literal tags.
|
||||
// ----------------------------------------
|
||||
// The code below handles copy tags.
|
||||
|
||||
tagCopy4:
|
||||
// case tagCopy4:
|
||||
// s += 5
|
||||
ADDQ $5, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-5])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
|
||||
MOVLQZX -4(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy2:
|
||||
// case tagCopy2:
|
||||
// s += 3
|
||||
ADDQ $3, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// length = 1 + int(src[s-3])>>2
|
||||
SHRQ $2, CX
|
||||
INCQ CX
|
||||
|
||||
// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
|
||||
MOVWQZX -2(SI), DX
|
||||
JMP doCopy
|
||||
|
||||
tagCopy:
|
||||
// We have a copy tag. We assume that:
|
||||
// - BX == src[s] & 0x03
|
||||
// - CX == src[s]
|
||||
CMPQ BX, $2
|
||||
JEQ tagCopy2
|
||||
JA tagCopy4
|
||||
|
||||
// case tagCopy1:
|
||||
// s += 2
|
||||
ADDQ $2, SI
|
||||
|
||||
// if uint(s) > uint(len(src)) { etc }
|
||||
MOVQ SI, BX
|
||||
SUBQ R11, BX
|
||||
CMPQ BX, R12
|
||||
JA errCorrupt
|
||||
|
||||
// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
|
||||
MOVQ CX, DX
|
||||
ANDQ $0xe0, DX
|
||||
SHLQ $3, DX
|
||||
MOVBQZX -1(SI), BX
|
||||
ORQ BX, DX
|
||||
|
||||
// length = 4 + int(src[s-2])>>2&0x7
|
||||
SHRQ $2, CX
|
||||
ANDQ $7, CX
|
||||
ADDQ $4, CX
|
||||
|
||||
doCopy:
|
||||
// This is the end of the outer "switch", when we have a copy tag.
|
||||
//
|
||||
// We assume that:
|
||||
// - CX == length && CX > 0
|
||||
// - DX == offset
|
||||
|
||||
// if offset <= 0 { etc }
|
||||
CMPQ DX, $0
|
||||
JLE errCorrupt
|
||||
|
||||
// if d < offset { etc }
|
||||
MOVQ DI, BX
|
||||
SUBQ R8, BX
|
||||
CMPQ BX, DX
|
||||
JLT errCorrupt
|
||||
|
||||
// if length > len(dst)-d { etc }
|
||||
MOVQ R10, BX
|
||||
SUBQ DI, BX
|
||||
CMPQ CX, BX
|
||||
JGT errCorrupt
|
||||
|
||||
// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
|
||||
//
|
||||
// Set:
|
||||
// - R14 = len(dst)-d
|
||||
// - R15 = &dst[d-offset]
|
||||
MOVQ R10, R14
|
||||
SUBQ DI, R14
|
||||
MOVQ DI, R15
|
||||
SUBQ DX, R15
|
||||
|
||||
// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
|
||||
//
|
||||
// First, try using two 8-byte load/stores, similar to the doLit technique
|
||||
// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
|
||||
// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
|
||||
// and not one 16-byte load/store, and the first store has to be before the
|
||||
// second load, due to the overlap if offset is in the range [8, 16).
|
||||
//
|
||||
// if length > 16 || offset < 8 || len(dst)-d < 16 {
|
||||
// goto slowForwardCopy
|
||||
// }
|
||||
// copy 16 bytes
|
||||
// d += length
|
||||
CMPQ CX, $16
|
||||
JGT slowForwardCopy
|
||||
CMPQ DX, $8
|
||||
JLT slowForwardCopy
|
||||
CMPQ R14, $16
|
||||
JLT slowForwardCopy
|
||||
MOVQ 0(R15), AX
|
||||
MOVQ AX, 0(DI)
|
||||
MOVQ 8(R15), BX
|
||||
MOVQ BX, 8(DI)
|
||||
ADDQ CX, DI
|
||||
JMP loop
|
||||
|
||||
slowForwardCopy:
|
||||
// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
|
||||
// can still try 8-byte load stores, provided we can overrun up to 10 extra
|
||||
// bytes. As above, the overrun will be fixed up by subsequent iterations
|
||||
// of the outermost loop.
|
||||
//
|
||||
// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
|
||||
// commentary says:
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// The main part of this loop is a simple copy of eight bytes at a time
|
||||
// until we've copied (at least) the requested amount of bytes. However,
|
||||
// if d and d-offset are less than eight bytes apart (indicating a
|
||||
// repeating pattern of length < 8), we first need to expand the pattern in
|
||||
// order to get the correct results. For instance, if the buffer looks like
|
||||
// this, with the eight-byte <d-offset> and <d> patterns marked as
|
||||
// intervals:
|
||||
//
|
||||
// abxxxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
|
||||
// once, after which we can move <d> two bytes without moving <d-offset>:
|
||||
//
|
||||
// ababxxxxxxxxxx
|
||||
// [------] d-offset
|
||||
// [------] d
|
||||
//
|
||||
// and repeat the exercise until the two no longer overlap.
|
||||
//
|
||||
// This allows us to do very well in the special case of one single byte
|
||||
// repeated many times, without taking a big hit for more general cases.
|
||||
//
|
||||
// The worst case of extra writing past the end of the match occurs when
|
||||
// offset == 1 and length == 1; the last copy will read from byte positions
|
||||
// [0..7] and write to [4..11], whereas it was only supposed to write to
|
||||
// position 1. Thus, ten excess bytes.
|
||||
//
|
||||
// ----
|
||||
//
|
||||
// That "10 byte overrun" worst case is confirmed by Go's
|
||||
// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
|
||||
// and finishSlowForwardCopy algorithm.
|
||||
//
|
||||
// if length > len(dst)-d-10 {
|
||||
// goto verySlowForwardCopy
|
||||
// }
|
||||
SUBQ $10, R14
|
||||
CMPQ CX, R14
|
||||
JGT verySlowForwardCopy
|
||||
|
||||
makeOffsetAtLeast8:
|
||||
// !!! As above, expand the pattern so that offset >= 8 and we can use
|
||||
// 8-byte load/stores.
|
||||
//
|
||||
// for offset < 8 {
|
||||
// copy 8 bytes from dst[d-offset:] to dst[d:]
|
||||
// length -= offset
|
||||
// d += offset
|
||||
// offset += offset
|
||||
// // The two previous lines together means that d-offset, and therefore
|
||||
// // R15, is unchanged.
|
||||
// }
|
||||
CMPQ DX, $8
|
||||
JGE fixUpSlowForwardCopy
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (DI)
|
||||
SUBQ DX, CX
|
||||
ADDQ DX, DI
|
||||
ADDQ DX, DX
|
||||
JMP makeOffsetAtLeast8
|
||||
|
||||
fixUpSlowForwardCopy:
|
||||
// !!! Add length (which might be negative now) to d (implied by DI being
|
||||
// &dst[d]) so that d ends up at the right place when we jump back to the
|
||||
// top of the loop. Before we do that, though, we save DI to AX so that, if
|
||||
// length is positive, copying the remaining length bytes will write to the
|
||||
// right place.
|
||||
MOVQ DI, AX
|
||||
ADDQ CX, DI
|
||||
|
||||
finishSlowForwardCopy:
|
||||
// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
|
||||
// length means that we overrun, but as above, that will be fixed up by
|
||||
// subsequent iterations of the outermost loop.
|
||||
CMPQ CX, $0
|
||||
JLE loop
|
||||
MOVQ (R15), BX
|
||||
MOVQ BX, (AX)
|
||||
ADDQ $8, R15
|
||||
ADDQ $8, AX
|
||||
SUBQ $8, CX
|
||||
JMP finishSlowForwardCopy
|
||||
|
||||
verySlowForwardCopy:
|
||||
// verySlowForwardCopy is a simple implementation of forward copy. In C
|
||||
// parlance, this is a do/while loop instead of a while loop, since we know
|
||||
// that length > 0. In Go syntax:
|
||||
//
|
||||
// for {
|
||||
// dst[d] = dst[d - offset]
|
||||
// d++
|
||||
// length--
|
||||
// if length == 0 {
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
MOVB (R15), BX
|
||||
MOVB BX, (DI)
|
||||
INCQ R15
|
||||
INCQ DI
|
||||
DECQ CX
|
||||
JNZ verySlowForwardCopy
|
||||
JMP loop
|
||||
|
||||
// The code above handles copy tags.
|
||||
// ----------------------------------------
|
||||
|
||||
end:
|
||||
// This is the end of the "for s < len(src)".
|
||||
//
|
||||
// if d != len(dst) { etc }
|
||||
CMPQ DI, R10
|
||||
JNE errCorrupt
|
||||
|
||||
// return 0
|
||||
MOVQ $0, ret+48(FP)
|
||||
RET
|
||||
|
||||
errCorrupt:
|
||||
// return decodeErrCodeCorrupt
|
||||
MOVQ $1, ret+48(FP)
|
||||
RET
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user