From bf02777d958bf41f2f51e2e5548f73a9cb3e7478 Mon Sep 17 00:00:00 2001 From: Asara Date: Fri, 29 Nov 2024 14:44:33 -0500 Subject: [PATCH] replace lightningpub with alby --- alby/well-known.go | 136 +++++ config/config.go | 2 + go.mod | 5 +- go.sum | 4 + lightningpub/well-known.go | 87 --- main.go | 6 +- nostr/policies.go | 8 +- sample.env | 1 + vendor/github.com/cespare/xxhash/LICENSE.txt | 22 - vendor/github.com/cespare/xxhash/README.md | 50 -- vendor/github.com/cespare/xxhash/rotate.go | 14 - vendor/github.com/cespare/xxhash/rotate19.go | 14 - vendor/github.com/cespare/xxhash/xxhash.go | 168 ------ .../github.com/cespare/xxhash/xxhash_amd64.go | 12 - .../github.com/cespare/xxhash/xxhash_amd64.s | 233 -------- .../github.com/cespare/xxhash/xxhash_other.go | 75 --- .../github.com/cespare/xxhash/xxhash_safe.go | 10 - .../cespare/xxhash/xxhash_unsafe.go | 30 -- vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 145 +++++ .../davecgh/go-spew/spew/bypasssafe.go | 38 ++ .../github.com/davecgh/go-spew/spew/common.go | 341 ++++++++++++ .../github.com/davecgh/go-spew/spew/config.go | 306 +++++++++++ vendor/github.com/davecgh/go-spew/spew/doc.go | 211 ++++++++ .../github.com/davecgh/go-spew/spew/dump.go | 509 ++++++++++++++++++ .../github.com/davecgh/go-spew/spew/format.go | 419 ++++++++++++++ .../github.com/davecgh/go-spew/spew/spew.go | 148 +++++ vendor/github.com/fiatjaf/khatru/nip86.go | 14 +- .../greatroar/blobloom/.gitattributes | 2 - .../greatroar/blobloom/.golangci.yml | 25 - vendor/github.com/greatroar/blobloom/LICENSE | 202 ------- .../github.com/greatroar/blobloom/README.md | 86 --- .../greatroar/blobloom/bloomfilter.go | 279 ---------- vendor/github.com/greatroar/blobloom/io.go | 246 --------- .../github.com/greatroar/blobloom/optimize.go | 201 ------- .../greatroar/blobloom/setop_64bit.go | 148 ----- .../greatroar/blobloom/setop_other.go | 115 ---- vendor/github.com/greatroar/blobloom/sync.go | 145 ----- vendor/github.com/greatroar/blobloom/test.sh | 16 - vendor/github.com/nbd-wtf/go-nostr/kinds.go | 2 +- .../nbd-wtf/go-nostr/nip77/nip77.go | 12 +- vendor/github.com/nbd-wtf/go-nostr/relay.go | 12 +- vendor/modules.txt | 9 +- 43 files changed, 2313 insertions(+), 2210 deletions(-) create mode 100644 alby/well-known.go delete mode 100644 lightningpub/well-known.go delete mode 100644 vendor/github.com/cespare/xxhash/LICENSE.txt delete mode 100644 vendor/github.com/cespare/xxhash/README.md delete mode 100644 vendor/github.com/cespare/xxhash/rotate.go delete mode 100644 vendor/github.com/cespare/xxhash/rotate19.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_amd64.s delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_other.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_safe.go delete mode 100644 vendor/github.com/cespare/xxhash/xxhash_unsafe.go create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/greatroar/blobloom/.gitattributes delete mode 100644 vendor/github.com/greatroar/blobloom/.golangci.yml delete mode 100644 vendor/github.com/greatroar/blobloom/LICENSE delete mode 100644 vendor/github.com/greatroar/blobloom/README.md delete mode 100644 vendor/github.com/greatroar/blobloom/bloomfilter.go delete mode 100644 vendor/github.com/greatroar/blobloom/io.go delete mode 100644 vendor/github.com/greatroar/blobloom/optimize.go delete mode 100644 vendor/github.com/greatroar/blobloom/setop_64bit.go delete mode 100644 vendor/github.com/greatroar/blobloom/setop_other.go delete mode 100644 vendor/github.com/greatroar/blobloom/sync.go delete mode 100644 vendor/github.com/greatroar/blobloom/test.sh diff --git a/alby/well-known.go b/alby/well-known.go new file mode 100644 index 0000000..54a9714 --- /dev/null +++ b/alby/well-known.go @@ -0,0 +1,136 @@ +package alby + +import ( + "encoding/json" + "fmt" + "net" + "net/http" + + "git.devvul.com/asara/gologger" + "git.devvul.com/asara/well-goknown/config" + "github.com/davecgh/go-spew/spew" +) + +type AlbyApps []AlbyApp + +type AlbyApp struct { + Id int32 `json:"id"` + Name string `json:"name"` + NostrPubkey string `json:"nostrPubkey"` +} + +type lnurlp struct { + Status string `json:"status"` + Tag string `json:"tag"` + CommentAllowed int32 `json:"commentAllowed"` + Callback string `json:"callback"` + MinSendable int64 `json:"minSendable"` + MaxSendable int64 `json:"maxSendable"` + Metadata string `json:"metadata"` + AllowsNostr bool `json:"allowsNostr"` + NostrPubkey string `json:"nostrPubkey"` +} + +type lnurlpError struct { + Status string `json:"status"` + Reason string `json:"reason"` +} + +func GetLnurlp(w http.ResponseWriter, r *http.Request) { + l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger() + albyAdmin := config.GetConfig().AlbyAdminAuth + + // setup response type + w.Header().Set("Content-Type", "application/json") + + // normalize domain + domain, _, err := net.SplitHostPort(r.Host) + if err != nil { + domain = r.Host + } + + name := r.PathValue("name") + + // get all alby apps + client := http.Client{} + req, err := http.NewRequest("GET", "https://alby.devvul.com/api/apps", nil) + if err != nil { + l.Debug().Msgf("unable to generate alby request for %s@%s: %s", name, domain, err.Error()) + lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unknown error"} + retError, _ := json.Marshal(lnurlpReturnError) + w.WriteHeader(http.StatusNotFound) + w.Write(retError) + return + } + + req.Header = http.Header{"Authorization": {fmt.Sprintf("Bearer %s", albyAdmin)}} + resp, err := client.Do(req) + defer resp.Body.Close() + + var albyApps AlbyApps + err = json.NewDecoder(resp.Body).Decode(&albyApps) + if err != nil { + l.Debug().Msgf("unable to unmarshal alby request for %s@%s: %s", name, domain, err.Error()) + lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unknown error"} + retError, _ := json.Marshal(lnurlpReturnError) + w.WriteHeader(http.StatusNotFound) + w.Write(retError) + return + } + + // check if user exists + var npk string + for _, element := range albyApps { + if element.Name == name { + npk = element.NostrPubkey + } + } + + if len(npk) == 0 { + l.Debug().Msgf("user doesn't exist in alby %s@%s: %s", name, domain, err.Error()) + lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "user does not exist"} + retError, _ := json.Marshal(lnurlpReturnError) + w.WriteHeader(http.StatusNotFound) + w.Write(retError) + return + } + + lnurlpReturn := &lnurlp{ + Status: "OK", + Tag: "payRequest", + CommentAllowed: 255, + Callback: fmt.Sprintf("https://%s/.well-known/lnurlp/%s/callback", domain, name), + MinSendable: 1000, + MaxSendable: 10000000, + Metadata: fmt.Sprintf("[[\"text/plain\", \"ln address payment to %s on the devvul server\"],[\"text/identifier\", \"%s@%s\"]]", name, name, domain), + AllowsNostr: true, + NostrPubkey: npk, + } + + ret, err := json.Marshal(lnurlpReturn) + if err != nil { + l.Debug().Msgf("unable to marshal json for %s@%s: %s", name, domain, err.Error()) + lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "User not found"} + retError, _ := json.Marshal(lnurlpReturnError) + w.WriteHeader(http.StatusNotFound) + w.Write(retError) + return + } + + l.Debug().Msgf("returning lnwallet for %s@%s", name, domain) + w.WriteHeader(http.StatusOK) + w.Write(ret) + return +} + +func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) { + // l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger() + // normalize domain + //domain, _, err := net.SplitHostPort(r.Host) + //if err != nil { + // domain = r.Host + //} + //name := r.PathValue("name") + + spew.Dump(r.URL.Query()) +} diff --git a/config/config.go b/config/config.go index 77c7f25..f846ec7 100644 --- a/config/config.go +++ b/config/config.go @@ -17,6 +17,7 @@ type ( RelayDescription string RelayIcon string RelayContact string + AlbyAdminAuth string } ) @@ -33,6 +34,7 @@ func GetConfig() Config { RelayDescription: getEnv("RELAY_DESCRIPTION", ""), RelayIcon: getEnv("RELAY_ICON", ""), RelayContact: getEnv("RELAY_CONTACT", ""), + AlbyAdminAuth: getEnv("ALBY_ADMIN_AUTH", ""), } } diff --git a/go.mod b/go.mod index 390b255..1116b6e 100644 --- a/go.mod +++ b/go.mod @@ -4,11 +4,12 @@ go 1.23.3 require ( git.devvul.com/asara/gologger v0.9.0 + github.com/davecgh/go-spew v1.1.1 github.com/fiatjaf/eventstore v0.14.0 - github.com/fiatjaf/khatru v0.12.0 + github.com/fiatjaf/khatru v0.12.1 github.com/jmoiron/sqlx v1.4.0 github.com/lib/pq v1.10.9 - github.com/nbd-wtf/go-nostr v0.42.2 + github.com/nbd-wtf/go-nostr v0.42.3 ) require ( diff --git a/go.sum b/go.sum index 4c54bae..abef854 100644 --- a/go.sum +++ b/go.sum @@ -28,6 +28,8 @@ github.com/fiatjaf/eventstore v0.14.0 h1:eAyugJGFRCrXYJLCc2nC/BIApmBbQN/Z4dxvNz1 github.com/fiatjaf/eventstore v0.14.0/go.mod h1:XOl5B6WGBX1a0ww6s3WT94QVOmye/6zDTtyWHVtHQ5U= github.com/fiatjaf/khatru v0.12.0 h1:pOWyahXl9UoyFTj/tX4Y3eM8nqGRHwMqM4F8ed7O3A0= github.com/fiatjaf/khatru v0.12.0/go.mod h1:GfKKAR27sMxBmepv709QnL7C9lEmlhaj41LFm/ueATc= +github.com/fiatjaf/khatru v0.12.1 h1:J7GlQy/Be0nAXH9JdS9jVMv2JdwLQhSu7TK3ZbiFZh4= +github.com/fiatjaf/khatru v0.12.1/go.mod h1:GfKKAR27sMxBmepv709QnL7C9lEmlhaj41LFm/ueATc= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= @@ -59,6 +61,8 @@ github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/nbd-wtf/go-nostr v0.42.2 h1:X8vpfLutvmyxqjsroKPHdIyPliNa6sYD8+CA0kDVySw= github.com/nbd-wtf/go-nostr v0.42.2/go.mod h1:FBa4FBJO7NuANvkeKSlrf0BIyxGufmrUbuelr6Q4Ick= +github.com/nbd-wtf/go-nostr v0.42.3 h1:wimwmXLhF9ScrNTG4by3eSj2p7HUGkLUospX4bHjxQk= +github.com/nbd-wtf/go-nostr v0.42.3/go.mod h1:p29g9i1UiSBKdyXkNa6V8rFqE+wrIn4UY0Emabwdu6A= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= diff --git a/lightningpub/well-known.go b/lightningpub/well-known.go deleted file mode 100644 index 5c441b7..0000000 --- a/lightningpub/well-known.go +++ /dev/null @@ -1,87 +0,0 @@ -package lightningpub - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - - "git.devvul.com/asara/gologger" - "git.devvul.com/asara/well-goknown/config" - "github.com/jmoiron/sqlx" -) - -var ( - DB *sqlx.DB -) - -type lnurlp struct { - Tag string `json:"tag"` - Callback string `json:"callback"` - MaxSendable int64 `json:"maxSendable"` - MinSendable int64 `json:"minSendable"` - Metadata string `json:"metadata"` - AllowsNostr bool `json:"allowsNostr"` - NostrPubkey string `json:"nostrPubkey"` -} - -func GetLnurlp(w http.ResponseWriter, r *http.Request) { - l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger() - - // normalize domain - domain, _, err := net.SplitHostPort(r.Host) - if err != nil { - domain = r.Host - } - - name := r.PathValue("name") - var lnwallet string - err = DB.QueryRow("SELECT wallet FROM lnwallets WHERE name=$1 AND domain=$2", name, domain).Scan(&lnwallet) - if err != nil { - l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error()) - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - //upstreamUrl := fmt.Sprintf("https://%s/api/guest/lnurl_pay/info?k1=%s", domain, lnwallet) - upstreamUrl := fmt.Sprintf("https://%s/api/guest/lnurl_pay/info?k1=%s", domain, lnwallet) - upstreamPayload, err := http.Get(upstreamUrl) - if err != nil { - l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error()) - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - defer upstreamPayload.Body.Close() - body, err := ioutil.ReadAll(upstreamPayload.Body) - if err != nil { - l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error()) - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - lnurlpReturn := lnurlp{} - err = json.Unmarshal(body, &lnurlpReturn) - if err != nil { - l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error()) - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - m := fmt.Sprintf("[[\"text/plain\", \"ln address payment to a user on the devvul server\"],[\"text/identifier\", \"%s@%s\"]]", name, domain) - lnurlpReturn.Metadata = m - - ret, err := json.Marshal(lnurlpReturn) - if err != nil { - l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error()) - http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound) - return - } - - l.Debug().Msgf("returning lnwallet for %s@%s", name, domain) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - w.Write(ret) - return -} diff --git a/main.go b/main.go index 9f8c206..f8e5e73 100644 --- a/main.go +++ b/main.go @@ -4,9 +4,9 @@ import ( "net/http" "git.devvul.com/asara/gologger" + "git.devvul.com/asara/well-goknown/alby" "git.devvul.com/asara/well-goknown/config" "git.devvul.com/asara/well-goknown/db" - "git.devvul.com/asara/well-goknown/lightningpub" "git.devvul.com/asara/well-goknown/matrix" "git.devvul.com/asara/well-goknown/nostr" "github.com/fiatjaf/eventstore/postgresql" @@ -22,7 +22,6 @@ func main() { db, _ := db.NewDB() defer db.Close() - lightningpub.DB = db nostr.DB = db nostr.RelayDb = postgresql.PostgresBackend{DatabaseURL: config.GetConfig().DbUrl} if err := nostr.RelayDb.Init(); err != nil { @@ -42,7 +41,8 @@ func main() { // lnurlp endpoint l.Debug().Msg("enabling lnurlp well-known endpoint") - http.HandleFunc("/.well-known/lnurlp/{name}", lightningpub.GetLnurlp) + http.HandleFunc("/.well-known/lnurlp/{name}", alby.GetLnurlp) + http.HandleFunc("/.well-known/lnurlp/{name}/callback", alby.GetLnurlpCallback) // start server port := config.GetConfig().ListenAddr diff --git a/nostr/policies.go b/nostr/policies.go index 062b84d..4e1e567 100644 --- a/nostr/policies.go +++ b/nostr/policies.go @@ -15,12 +15,16 @@ func RejectUnregisteredNpubs(ctx context.Context, event *nostr.Event) (reject bo // always allow the following kinds // 13: nip-59 seals + // 9734: nip-57 zap request // 9735: nip-57 zap receipt - // 21000: lightning.pub rpc + // 13194: nip-47 info event // 22242: nip-42 client auth + // 23194: nip-47 request + // 23195: nip-47 response + // 23196: nip-47 notification // 30078: nip-78 addressable events switch event.Kind { - case 13, 9735, 21000, 22242, 30078: + case 13, 9734, 9735, 13194, 22242, 23194, 23195, 23196, 30078: return false, "" } diff --git a/sample.env b/sample.env index dcbc1b6..bbdf196 100644 --- a/sample.env +++ b/sample.env @@ -8,3 +8,4 @@ export RELAY_DESCRIPTION="nostr relay running via git.devvul.com/asara/well-gokn export RELAY_ICON="" export RELAY_NAME="Nostr Relay" export RELAY_PUBKEY="" +export ALBY_ADMIN_AUTH="" diff --git a/vendor/github.com/cespare/xxhash/LICENSE.txt b/vendor/github.com/cespare/xxhash/LICENSE.txt deleted file mode 100644 index 24b5306..0000000 --- a/vendor/github.com/cespare/xxhash/LICENSE.txt +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2016 Caleb Spare - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/cespare/xxhash/README.md b/vendor/github.com/cespare/xxhash/README.md deleted file mode 100644 index 0982fd2..0000000 --- a/vendor/github.com/cespare/xxhash/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# xxhash - -[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash) - -xxhash is a Go implementation of the 64-bit -[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a -high-quality hashing algorithm that is much faster than anything in the Go -standard library. - -The API is very small, taking its cue from the other hashing packages in the -standard library: - - $ go doc github.com/cespare/xxhash ! - package xxhash // import "github.com/cespare/xxhash" - - Package xxhash implements the 64-bit variant of xxHash (XXH64) as described - at http://cyan4973.github.io/xxHash/. - - func New() hash.Hash64 - func Sum64(b []byte) uint64 - func Sum64String(s string) uint64 - -This implementation provides a fast pure-Go implementation and an even faster -assembly implementation for amd64. - -## Benchmarks - -Here are some quick benchmarks comparing the pure-Go and assembly -implementations of Sum64 against another popular Go XXH64 implementation, -[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash): - -| input size | OneOfOne | cespare (purego) | cespare | -| --- | --- | --- | --- | -| 5 B | 416 MB/s | 720 MB/s | 872 MB/s | -| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s | -| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s | -| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s | - -These numbers were generated with: - -``` -$ go test -benchtime 10s -bench '/OneOfOne,' -$ go test -tags purego -benchtime 10s -bench '/xxhash,' -$ go test -benchtime 10s -bench '/xxhash,' -``` - -## Projects using this package - -- [InfluxDB](https://github.com/influxdata/influxdb) -- [Prometheus](https://github.com/prometheus/prometheus) diff --git a/vendor/github.com/cespare/xxhash/rotate.go b/vendor/github.com/cespare/xxhash/rotate.go deleted file mode 100644 index f3eac5e..0000000 --- a/vendor/github.com/cespare/xxhash/rotate.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !go1.9 - -package xxhash - -// TODO(caleb): After Go 1.10 comes out, remove this fallback code. - -func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) } -func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) } -func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) } -func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) } -func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) } -func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) } -func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) } -func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) } diff --git a/vendor/github.com/cespare/xxhash/rotate19.go b/vendor/github.com/cespare/xxhash/rotate19.go deleted file mode 100644 index b99612b..0000000 --- a/vendor/github.com/cespare/xxhash/rotate19.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build go1.9 - -package xxhash - -import "math/bits" - -func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) } -func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) } -func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) } -func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) } -func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) } -func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) } -func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) } -func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) } diff --git a/vendor/github.com/cespare/xxhash/xxhash.go b/vendor/github.com/cespare/xxhash/xxhash.go deleted file mode 100644 index f896bd2..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described -// at http://cyan4973.github.io/xxHash/. -package xxhash - -import ( - "encoding/binary" - "hash" -) - -const ( - prime1 uint64 = 11400714785074694791 - prime2 uint64 = 14029467366897019727 - prime3 uint64 = 1609587929392839161 - prime4 uint64 = 9650029242287828579 - prime5 uint64 = 2870177450012600261 -) - -// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where -// possible in the Go code is worth a small (but measurable) performance boost -// by avoiding some MOVQs. Vars are needed for the asm and also are useful for -// convenience in the Go code in a few places where we need to intentionally -// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the -// result overflows a uint64). -var ( - prime1v = prime1 - prime2v = prime2 - prime3v = prime3 - prime4v = prime4 - prime5v = prime5 -) - -type xxh struct { - v1 uint64 - v2 uint64 - v3 uint64 - v4 uint64 - total int - mem [32]byte - n int // how much of mem is used -} - -// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm. -func New() hash.Hash64 { - var x xxh - x.Reset() - return &x -} - -func (x *xxh) Reset() { - x.n = 0 - x.total = 0 - x.v1 = prime1v + prime2 - x.v2 = prime2 - x.v3 = 0 - x.v4 = -prime1v -} - -func (x *xxh) Size() int { return 8 } -func (x *xxh) BlockSize() int { return 32 } - -// Write adds more data to x. It always returns len(b), nil. -func (x *xxh) Write(b []byte) (n int, err error) { - n = len(b) - x.total += len(b) - - if x.n+len(b) < 32 { - // This new data doesn't even fill the current block. - copy(x.mem[x.n:], b) - x.n += len(b) - return - } - - if x.n > 0 { - // Finish off the partial block. - copy(x.mem[x.n:], b) - x.v1 = round(x.v1, u64(x.mem[0:8])) - x.v2 = round(x.v2, u64(x.mem[8:16])) - x.v3 = round(x.v3, u64(x.mem[16:24])) - x.v4 = round(x.v4, u64(x.mem[24:32])) - b = b[32-x.n:] - x.n = 0 - } - - if len(b) >= 32 { - // One or more full blocks left. - b = writeBlocks(x, b) - } - - // Store any remaining partial block. - copy(x.mem[:], b) - x.n = len(b) - - return -} - -func (x *xxh) Sum(b []byte) []byte { - s := x.Sum64() - return append( - b, - byte(s>>56), - byte(s>>48), - byte(s>>40), - byte(s>>32), - byte(s>>24), - byte(s>>16), - byte(s>>8), - byte(s), - ) -} - -func (x *xxh) Sum64() uint64 { - var h uint64 - - if x.total >= 32 { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = x.v3 + prime5 - } - - h += uint64(x.total) - - i, end := 0, x.n - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(x.mem[i:i+8])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(x.mem[i:i+4])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for i < end { - h ^= uint64(x.mem[i]) * prime5 - h = rol11(h) * prime1 - i++ - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) } -func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) } - -func round(acc, input uint64) uint64 { - acc += input * prime2 - acc = rol31(acc) - acc *= prime1 - return acc -} - -func mergeRound(acc, val uint64) uint64 { - val = round(0, val) - acc ^= val - acc = acc*prime1 + prime4 - return acc -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/xxhash_amd64.go deleted file mode 100644 index d617652..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -// -//go:noescape -func Sum64(b []byte) uint64 - -func writeBlocks(x *xxh, b []byte) []byte diff --git a/vendor/github.com/cespare/xxhash/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/xxhash_amd64.s deleted file mode 100644 index 757f201..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_amd64.s +++ /dev/null @@ -1,233 +0,0 @@ -// +build !appengine -// +build gc -// +build !purego - -#include "textflag.h" - -// Register allocation: -// AX h -// CX pointer to advance through b -// DX n -// BX loop end -// R8 v1, k1 -// R9 v2 -// R10 v3 -// R11 v4 -// R12 tmp -// R13 prime1v -// R14 prime2v -// R15 prime4v - -// round reads from and advances the buffer pointer in CX. -// It assumes that R13 has prime1v and R14 has prime2v. -#define round(r) \ - MOVQ (CX), R12 \ - ADDQ $8, CX \ - IMULQ R14, R12 \ - ADDQ R12, r \ - ROLQ $31, r \ - IMULQ R13, r - -// mergeRound applies a merge round on the two registers acc and val. -// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v. -#define mergeRound(acc, val) \ - IMULQ R14, val \ - ROLQ $31, val \ - IMULQ R13, val \ - XORQ val, acc \ - IMULQ R13, acc \ - ADDQ R15, acc - -// func Sum64(b []byte) uint64 -TEXT ·Sum64(SB), NOSPLIT, $0-32 - // Load fixed primes. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - MOVQ ·prime4v(SB), R15 - - // Load slice. - MOVQ b_base+0(FP), CX - MOVQ b_len+8(FP), DX - LEAQ (CX)(DX*1), BX - - // The first loop limit will be len(b)-32. - SUBQ $32, BX - - // Check whether we have at least one block. - CMPQ DX, $32 - JLT noBlocks - - // Set up initial state (v1, v2, v3, v4). - MOVQ R13, R8 - ADDQ R14, R8 - MOVQ R14, R9 - XORQ R10, R10 - XORQ R11, R11 - SUBQ R13, R11 - - // Loop until CX > BX. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - MOVQ R8, AX - ROLQ $1, AX - MOVQ R9, R12 - ROLQ $7, R12 - ADDQ R12, AX - MOVQ R10, R12 - ROLQ $12, R12 - ADDQ R12, AX - MOVQ R11, R12 - ROLQ $18, R12 - ADDQ R12, AX - - mergeRound(AX, R8) - mergeRound(AX, R9) - mergeRound(AX, R10) - mergeRound(AX, R11) - - JMP afterBlocks - -noBlocks: - MOVQ ·prime5v(SB), AX - -afterBlocks: - ADDQ DX, AX - - // Right now BX has len(b)-32, and we want to loop until CX > len(b)-8. - ADDQ $24, BX - - CMPQ CX, BX - JG fourByte - -wordLoop: - // Calculate k1. - MOVQ (CX), R8 - ADDQ $8, CX - IMULQ R14, R8 - ROLQ $31, R8 - IMULQ R13, R8 - - XORQ R8, AX - ROLQ $27, AX - IMULQ R13, AX - ADDQ R15, AX - - CMPQ CX, BX - JLE wordLoop - -fourByte: - ADDQ $4, BX - CMPQ CX, BX - JG singles - - MOVL (CX), R8 - ADDQ $4, CX - IMULQ R13, R8 - XORQ R8, AX - - ROLQ $23, AX - IMULQ R14, AX - ADDQ ·prime3v(SB), AX - -singles: - ADDQ $4, BX - CMPQ CX, BX - JGE finalize - -singlesLoop: - MOVBQZX (CX), R12 - ADDQ $1, CX - IMULQ ·prime5v(SB), R12 - XORQ R12, AX - - ROLQ $11, AX - IMULQ R13, AX - - CMPQ CX, BX - JL singlesLoop - -finalize: - MOVQ AX, R12 - SHRQ $33, R12 - XORQ R12, AX - IMULQ R14, AX - MOVQ AX, R12 - SHRQ $29, R12 - XORQ R12, AX - IMULQ ·prime3v(SB), AX - MOVQ AX, R12 - SHRQ $32, R12 - XORQ R12, AX - - MOVQ AX, ret+24(FP) - RET - -// writeBlocks uses the same registers as above except that it uses AX to store -// the x pointer. - -// func writeBlocks(x *xxh, b []byte) []byte -TEXT ·writeBlocks(SB), NOSPLIT, $0-56 - // Load fixed primes needed for round. - MOVQ ·prime1v(SB), R13 - MOVQ ·prime2v(SB), R14 - - // Load slice. - MOVQ b_base+8(FP), CX - MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below - MOVQ b_len+16(FP), DX - LEAQ (CX)(DX*1), BX - SUBQ $32, BX - - // Load vN from x. - MOVQ x+0(FP), AX - MOVQ 0(AX), R8 // v1 - MOVQ 8(AX), R9 // v2 - MOVQ 16(AX), R10 // v3 - MOVQ 24(AX), R11 // v4 - - // We don't need to check the loop condition here; this function is - // always called with at least one block of data to process. -blockLoop: - round(R8) - round(R9) - round(R10) - round(R11) - - CMPQ CX, BX - JLE blockLoop - - // Copy vN back to x. - MOVQ R8, 0(AX) - MOVQ R9, 8(AX) - MOVQ R10, 16(AX) - MOVQ R11, 24(AX) - - // Construct return slice. - // NOTE: It's important that we don't construct a slice that has a base - // pointer off the end of the original slice, as in Go 1.7+ this will - // cause runtime crashes. (See discussion in, for example, - // https://github.com/golang/go/issues/16772.) - // Therefore, we calculate the length/cap first, and if they're zero, we - // keep the old base. This is what the compiler does as well if you - // write code like - // b = b[len(b):] - - // New length is 32 - (CX - BX) -> BX+32 - CX. - ADDQ $32, BX - SUBQ CX, BX - JZ afterSetBase - - MOVQ CX, ret_base+32(FP) - -afterSetBase: - MOVQ BX, ret_len+40(FP) - MOVQ BX, ret_cap+48(FP) // set cap == len - - RET diff --git a/vendor/github.com/cespare/xxhash/xxhash_other.go b/vendor/github.com/cespare/xxhash/xxhash_other.go deleted file mode 100644 index c68d13f..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_other.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build !amd64 appengine !gc purego - -package xxhash - -// Sum64 computes the 64-bit xxHash digest of b. -func Sum64(b []byte) uint64 { - // A simpler version would be - // x := New() - // x.Write(b) - // return x.Sum64() - // but this is faster, particularly for small inputs. - - n := len(b) - var h uint64 - - if n >= 32 { - v1 := prime1v + prime2 - v2 := prime2 - v3 := uint64(0) - v4 := -prime1v - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) - h = mergeRound(h, v1) - h = mergeRound(h, v2) - h = mergeRound(h, v3) - h = mergeRound(h, v4) - } else { - h = prime5 - } - - h += uint64(n) - - i, end := 0, len(b) - for ; i+8 <= end; i += 8 { - k1 := round(0, u64(b[i:i+8:len(b)])) - h ^= k1 - h = rol27(h)*prime1 + prime4 - } - if i+4 <= end { - h ^= uint64(u32(b[i:i+4:len(b)])) * prime1 - h = rol23(h)*prime2 + prime3 - i += 4 - } - for ; i < end; i++ { - h ^= uint64(b[i]) * prime5 - h = rol11(h) * prime1 - } - - h ^= h >> 33 - h *= prime2 - h ^= h >> 29 - h *= prime3 - h ^= h >> 32 - - return h -} - -func writeBlocks(x *xxh, b []byte) []byte { - v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4 - for len(b) >= 32 { - v1 = round(v1, u64(b[0:8:len(b)])) - v2 = round(v2, u64(b[8:16:len(b)])) - v3 = round(v3, u64(b[16:24:len(b)])) - v4 = round(v4, u64(b[24:32:len(b)])) - b = b[32:len(b):len(b)] - } - x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4 - return b -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_safe.go b/vendor/github.com/cespare/xxhash/xxhash_safe.go deleted file mode 100644 index dfa15ab..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_safe.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build appengine - -// This file contains the safe implementations of otherwise unsafe-using code. - -package xxhash - -// Sum64String computes the 64-bit xxHash digest of s. -func Sum64String(s string) uint64 { - return Sum64([]byte(s)) -} diff --git a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/xxhash_unsafe.go deleted file mode 100644 index d2b64e8..0000000 --- a/vendor/github.com/cespare/xxhash/xxhash_unsafe.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !appengine - -// This file encapsulates usage of unsafe. -// xxhash_safe.go contains the safe implementations. - -package xxhash - -import ( - "reflect" - "unsafe" -) - -// Sum64String computes the 64-bit xxHash digest of s. -// It may be faster than Sum64([]byte(s)) by avoiding a copy. -// -// TODO(caleb): Consider removing this if an optimization is ever added to make -// it unnecessary: https://golang.org/issue/2205. -// -// TODO(caleb): We still have a function call; we could instead write Go/asm -// copies of Sum64 for strings to squeeze out a bit more speed. -func Sum64String(s string) uint64 { - // See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ - // for some discussion about this unsafe conversion. - var b []byte - bh := (*reflect.SliceHeader)(unsafe.Pointer(&b)) - bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data - bh.Len = len(s) - bh.Cap = len(s) - return Sum64(b) -} diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/fiatjaf/khatru/nip86.go b/vendor/github.com/fiatjaf/khatru/nip86.go index 35523bb..73b500e 100644 --- a/vendor/github.com/fiatjaf/khatru/nip86.go +++ b/vendor/github.com/fiatjaf/khatru/nip86.go @@ -65,16 +65,22 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) { resp.Error = "missing auth" goto respond } - if evtj, err := base64.StdEncoding.DecodeString(spl[1]); err != nil { + + evtj, err := base64.StdEncoding.DecodeString(spl[1]) + if err != nil { resp.Error = "invalid base64 auth" goto respond - } else if err := json.Unmarshal(evtj, &evt); err != nil { + } + if err := json.Unmarshal(evtj, &evt); err != nil { resp.Error = "invalid auth event json" goto respond - } else if ok, _ := evt.CheckSignature(); !ok { + } + if ok, _ := evt.CheckSignature(); !ok { resp.Error = "invalid auth event" goto respond - } else if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || getServiceBaseURL(r) != (*uTag)[1] { + } + + if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || rl.ServiceURL != (*uTag)[1] { resp.Error = "invalid 'u' tag" goto respond } else if pht := evt.Tags.GetFirst([]string{"payload", hex.EncodeToString(payloadHash[:])}); pht == nil { diff --git a/vendor/github.com/greatroar/blobloom/.gitattributes b/vendor/github.com/greatroar/blobloom/.gitattributes deleted file mode 100644 index 5ce4535..0000000 --- a/vendor/github.com/greatroar/blobloom/.gitattributes +++ /dev/null @@ -1,2 +0,0 @@ -# Work around https://github.com/golang/go/issues/52268. -**/testdata/fuzz/*/* eol=lf diff --git a/vendor/github.com/greatroar/blobloom/.golangci.yml b/vendor/github.com/greatroar/blobloom/.golangci.yml deleted file mode 100644 index cf8c53f..0000000 --- a/vendor/github.com/greatroar/blobloom/.golangci.yml +++ /dev/null @@ -1,25 +0,0 @@ -# Configuration for golangci-lint. - -linters: - disable: - - asciicheck - enable: - - gocognit - - gocyclo - - godot - - gofumpt - - lll - - misspell - - nakedret - - thelper - -issues: - exclude-rules: - - path: _test\.go - linters: - errcheck - -linters-settings: - govet: - enable: - - atomicalign diff --git a/vendor/github.com/greatroar/blobloom/LICENSE b/vendor/github.com/greatroar/blobloom/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/greatroar/blobloom/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/greatroar/blobloom/README.md b/vendor/github.com/greatroar/blobloom/README.md deleted file mode 100644 index d615f2b..0000000 --- a/vendor/github.com/greatroar/blobloom/README.md +++ /dev/null @@ -1,86 +0,0 @@ -Blobloom -======== - -A Bloom filter package for Go (golang) with no compile-time dependencies. - -This package implements a version of Bloom filters called [blocked Bloom filters]( -https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf), -which get a speed boost from using the CPU cache more efficiently -than regular Bloom filters. - -Unlike most Bloom filter packages for Go, -this one doesn't run a hash function for you. -That's a benefit if you need a custom hash -or you want pick the fastest one for an application. - -Usage ------ - -To construct a Bloom filter, you need to know how many keys you want to store -and what rate of false positives you find acceptable. - - f := blobloom.NewOptimized(blobloom.Config{ - Capacity: nkeys, // Expected number of keys. - FPRate: 1e-4, // Accept one false positive per 10,000 lookups. - }) - -To add a key: - - // import "github.com/cespare/xxhash/v2" - f.Add(xxhash.Sum64(key)) - -To test for the presence of a key in the filter: - - if f.Has(xxhash.Sum64(key)) { - // Key is probably in f. - } else { - // Key is certainly not in f. - } - -The false positive rate is defined as usual: -if you look up 10,000 random keys in a Bloom filter filled to capacity, -an expected one of those is a false positive for FPRate 1e-4. - -See the examples/ directory and the -[package documentation](https://pkg.go.dev/github.com/greatroar/blobloom) -for further usage information and examples. - -Hash functions --------------- - -Blobloom does not provide hash functions. Instead, it requires client code to -represent each key as a single 64-bit hash value, leaving it to the user to -pick the right hash function for a particular problem. Here are some general -suggestions: - -* If you use Bloom filters to speed up access to a key-value store, you might -want to look at [xxh3](https://github.com/zeebo/xxh3) or [xxhash]( -https://github.com/cespare/xxhash). -* If your keys are cryptographic hashes, consider using the first 8 bytes of those hashes. -* If you use Bloom filters to make probabilistic decisions, a randomized hash -function such as [maphash](https://golang.org/pkg/hash/maphash) should prevent -the same false positives occurring every time. - -When evaluating a hash function, or designing a custom one, -make sure it is a 64-bit hash that properly mixes its input bits. -Casting a 32-bit hash to uint64 gives suboptimal results. -So does passing integer keys in without running them through a mixing function. - - - -License -------- - -Copyright © 2020-2023 the Blobloom authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/github.com/greatroar/blobloom/bloomfilter.go b/vendor/github.com/greatroar/blobloom/bloomfilter.go deleted file mode 100644 index 78f09de..0000000 --- a/vendor/github.com/greatroar/blobloom/bloomfilter.go +++ /dev/null @@ -1,279 +0,0 @@ -// Copyright 2020-2022 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package blobloom implements blocked Bloom filters. -// -// Blocked Bloom filters are an approximate set data structure: if a key has -// been added to a filter, a lookup of that key returns true, but if the key -// has not been added, there is a non-zero probability that the lookup still -// returns true (a false positive). False negatives are impossible: if the -// lookup for a key returns false, that key has not been added. -// -// In this package, keys are represented exclusively as hashes. Client code -// is responsible for supplying a 64-bit hash value. -// -// Compared to standard Bloom filters, blocked Bloom filters use the CPU -// cache more efficiently. A blocked Bloom filter is an array of ordinary -// Bloom filters of fixed size BlockBits (the blocks). The lower half of the -// hash selects the block to use. -// -// To achieve the same false positive rate (FPR) as a standard Bloom filter, -// a blocked Bloom filter requires more memory. For an FPR of at most 2e-6 -// (two in a million), it uses ~20% more memory. At 1e-10, the space required -// is double that of standard Bloom filter. -// -// For more details, see the 2010 paper by Putze, Sanders and Singler, -// https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf. -package blobloom - -import "math" - -// BlockBits is the number of bits per block and the minimum number of bits -// in a Filter. -// -// The value of this constant is chosen to match the L1 cache line size -// of popular architectures (386, amd64, arm64). -const BlockBits = 512 - -// MaxBits is the maximum number of bits supported by a Filter. -const MaxBits = BlockBits << 32 // 256GiB. - -// A Filter is a blocked Bloom filter. -type Filter struct { - b []block // Shards. - k int // Number of hash functions required. -} - -// New constructs a Bloom filter with given numbers of bits and hash functions. -// -// The number of bits should be at least BlockBits; smaller values are silently -// increased. -// -// The number of hashes reflects the number of hashes synthesized from the -// single hash passed in by the client. It is silently increased to two if -// a lower value is given. -func New(nbits uint64, nhashes int) *Filter { - nbits, nhashes = fixBitsAndHashes(nbits, nhashes) - - return &Filter{ - b: make([]block, nbits/BlockBits), - k: nhashes, - } -} - -func fixBitsAndHashes(nbits uint64, nhashes int) (uint64, int) { - if nbits < 1 { - nbits = BlockBits - } - if nhashes < 2 { - nhashes = 2 - } - if nbits > MaxBits { - panic("nbits exceeds MaxBits") - } - - // Round nbits up to a multiple of BlockBits. - if nbits%BlockBits != 0 { - nbits += BlockBits - nbits%BlockBits - } - - return nbits, nhashes -} - -// Add insert a key with hash value h into f. -func (f *Filter) Add(h uint64) { - h1, h2 := uint32(h>>32), uint32(h) - b := getblock(f.b, h2) - - for i := 1; i < f.k; i++ { - h1, h2 = doublehash(h1, h2, i) - b.setbit(h1) - } -} - -// log(1 - 1/BlockBits) computed with 128 bits precision. -// Note that this is extremely close to -1/BlockBits, -// which is what Wikipedia would have us use: -// https://en.wikipedia.org/wiki/Bloom_filter#Approximating_the_number_of_items_in_a_Bloom_filter. -const log1minus1divBlockbits = -0.0019550348358033505576274922418668121377 - -// Cardinality estimates the number of distinct keys added to f. -// -// The estimate is most reliable when f is filled to roughly its capacity. -// It gets worse as f gets more densely filled. When one of the blocks is -// entirely filled, the estimate becomes +Inf. -// -// The return value is the maximum likelihood estimate of Papapetrou, Siberski -// and Nejdl, summed over the blocks -// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf). -func (f *Filter) Cardinality() float64 { - return cardinality(f.k, f.b, onescount) -} - -func cardinality(nhashes int, b []block, onescount func(*block) int) float64 { - k := float64(nhashes - 1) - - // The probability of some bit not being set in a single insertion is - // p0 = (1-1/BlockBits)^k. - // - // logProb0Inv = 1 / log(p0) = 1 / (k*log(1-1/BlockBits)). - logProb0Inv := 1 / (k * log1minus1divBlockbits) - - var n float64 - for i := range b { - ones := onescount(&b[i]) - if ones == 0 { - continue - } - n += math.Log1p(-float64(ones) / BlockBits) - } - return n * logProb0Inv -} - -// Clear resets f to its empty state. -func (f *Filter) Clear() { - for i := 0; i < len(f.b); i++ { - f.b[i] = block{} - } -} - -// Empty reports whether f contains no keys. -func (f *Filter) Empty() bool { - for i := 0; i < len(f.b); i++ { - if f.b[i] != (block{}) { - return false - } - } - return true -} - -// Equals returns true if f and g contain the same keys (in terms of Has) -// when used with the same hash function. -func (f *Filter) Equals(g *Filter) bool { - if g.k != f.k || len(g.b) != len(f.b) { - return false - } - for i := range g.b { - if f.b[i] != g.b[i] { - return false - } - } - return true -} - -// Fill set f to a completely full filter. -// After Fill, Has returns true for any key. -func (f *Filter) Fill() { - for i := 0; i < len(f.b); i++ { - for j := 0; j < blockWords; j++ { - f.b[i][j] = ^uint32(0) - } - } -} - -// Has reports whether a key with hash value h has been added. -// It may return a false positive. -func (f *Filter) Has(h uint64) bool { - h1, h2 := uint32(h>>32), uint32(h) - b := getblock(f.b, h2) - - for i := 1; i < f.k; i++ { - h1, h2 = doublehash(h1, h2, i) - if !b.getbit(h1) { - return false - } - } - return true -} - -// doublehash generates the hash values to use in iteration i of -// enhanced double hashing from the values h1, h2 of the previous iteration. -// See https://www.ccs.neu.edu/home/pete/pub/bloom-filters-verification.pdf. -func doublehash(h1, h2 uint32, i int) (uint32, uint32) { - h1 = h1 + h2 - h2 = h2 + uint32(i) - return h1, h2 -} - -// NumBits returns the number of bits of f. -func (f *Filter) NumBits() uint64 { - return BlockBits * uint64(len(f.b)) -} - -func checkBinop(f, g *Filter) { - if len(f.b) != len(g.b) { - panic("Bloom filters do not have the same number of bits") - } - if f.k != g.k { - panic("Bloom filters do not have the same number of hash functions") - } -} - -// Intersect sets f to the intersection of f and g. -// -// Intersect panics when f and g do not have the same number of bits and -// hash functions. Both Filters must be using the same hash function(s), -// but Intersect cannot check this. -// -// Since Bloom filters may return false positives, Has may return true for -// a key that was not in both f and g. -// -// After Intersect, the estimates from f.Cardinality and f.FPRate should be -// considered unreliable. -func (f *Filter) Intersect(g *Filter) { - checkBinop(f, g) - f.intersect(g) -} - -// Union sets f to the union of f and g. -// -// Union panics when f and g do not have the same number of bits and -// hash functions. Both Filters must be using the same hash function(s), -// but Union cannot check this. -func (f *Filter) Union(g *Filter) { - checkBinop(f, g) - f.union(g) -} - -const ( - wordSize = 32 - blockWords = BlockBits / wordSize -) - -// A block is a fixed-size Bloom filter, used as a shard of a Filter. -type block [blockWords]uint32 - -func getblock(b []block, h2 uint32) *block { - i := reducerange(h2, uint32(len(b))) - return &b[i] -} - -// reducerange maps i to an integer in the range [0,n). -// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ -func reducerange(i, n uint32) uint32 { - return uint32((uint64(i) * uint64(n)) >> 32) -} - -// getbit reports whether bit (i modulo BlockBits) is set. -func (b *block) getbit(i uint32) bool { - bit := uint32(1) << (i % wordSize) - x := (*b)[(i/wordSize)%blockWords] & bit - return x != 0 -} - -// setbit sets bit (i modulo BlockBits) of b. -func (b *block) setbit(i uint32) { - bit := uint32(1) << (i % wordSize) - (*b)[(i/wordSize)%blockWords] |= bit -} diff --git a/vendor/github.com/greatroar/blobloom/io.go b/vendor/github.com/greatroar/blobloom/io.go deleted file mode 100644 index df104d9..0000000 --- a/vendor/github.com/greatroar/blobloom/io.go +++ /dev/null @@ -1,246 +0,0 @@ -// Copyright 2023 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package blobloom - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "strings" - "sync/atomic" -) - -const maxCommentLen = 44 - -// Dump writes f to w, with an optional comment string, in the binary format -// that a Loader accepts. It returns the number of bytes written to w. -// -// The comment may contain arbitrary data, within the limits layed out by the -// format description. It can be used to record the hash function to be used -// with a Filter. -func Dump(w io.Writer, f *Filter, comment string) (int64, error) { - return dump(w, f.b, f.k, comment) -} - -// DumpSync is like Dump, but for SyncFilters. -// -// If other goroutines are simultaneously modifying f, -// their modifications may not be reflected in the dump. -// Separate synchronization is required to prevent this. -// -// The format produced is the same as Dump's. The fact that -// the argument is a SyncFilter is not encoded in the dump. -func DumpSync(w io.Writer, f *SyncFilter, comment string) (n int64, err error) { - return dump(w, f.b, f.k, comment) -} - -func dump(w io.Writer, b []block, nhashes int, comment string) (n int64, err error) { - switch { - case len(b) == 0 || nhashes == 0: - err = errors.New("blobloom: won't dump uninitialized Filter") - case len(comment) > maxCommentLen: - err = fmt.Errorf("blobloom: comment of length %d too long", len(comment)) - case strings.IndexByte(comment, 0) != -1: - err = fmt.Errorf("blobloom: comment %q contains zero byte", len(comment)) - } - if err != nil { - return 0, err - } - - var buf [64]byte - copy(buf[:8], "blobloom") - // As documented in the comment for Loader, we store one less than the - // number of blocks. This way, we can use the otherwise invalid value 0 - // and store 2³² blocks instead of at most 2³²-1. - binary.LittleEndian.PutUint32(buf[12:], uint32(len(b)-1)) - binary.LittleEndian.PutUint32(buf[16:], uint32(nhashes)) - copy(buf[20:], comment) - - k, err := w.Write(buf[:]) - n = int64(k) - if err != nil { - return n, err - } - - for i := range b { - for j := range b[i] { - x := atomic.LoadUint32(&b[i][j]) - binary.LittleEndian.PutUint32(buf[4*j:], x) - } - k, err = w.Write(buf[:]) - n += int64(k) - if err != nil { - break - } - } - - return n, err -} - -// A Loader reads a Filter or SyncFilter from an io.Reader. -// -// A Loader accepts the binary format produced by Dump. The format starts -// with a 64-byte header: -// - the string "blobloom", in ASCII; -// - a four-byte version number, which must be zero; -// - the number of Bloom filter blocks, minus one, as a 32-bit integer; -// - the number of hashes, as a 32-bit integer; -// - a comment of at most 44 non-zero bytes, padded to 44 bytes with zeros. -// -// After the header come the 512-bit blocks, divided into sixteen 32-bit limbs. -// All integers are little-endian. -type Loader struct { - buf [64]byte - r io.Reader - err error - - Comment string // Comment field. Filled in by NewLoader. - nblocks uint64 - nhashes int -} - -// NewLoader parses the format header from r and returns a Loader -// that can be used to load a Filter from it. -func NewLoader(r io.Reader) (*Loader, error) { - l := &Loader{r: r} - - err := l.fillbuf() - if err != nil { - return nil, err - } - - version := binary.LittleEndian.Uint32(l.buf[8:]) - // See comment in dump for the +1. - l.nblocks = 1 + uint64(binary.LittleEndian.Uint32(l.buf[12:])) - l.nhashes = int(binary.LittleEndian.Uint32(l.buf[16:])) - comment := l.buf[20:] - - switch { - case string(l.buf[:8]) != "blobloom": - err = errors.New("blobloom: not a Bloom filter dump") - case version != 0: - err = errors.New("blobloom: unsupported dump version") - case l.nhashes == 0: - err = errors.New("blobloom: zero hashes in Bloom filter dump") - } - if err == nil { - comment, err = checkComment(comment) - l.Comment = string(comment) - } - - if err != nil { - l = nil - } - return l, err -} - -// Load sets f to the union of f and the Loader's filter, then returns f. -// If f is nil, a new Filter of the appropriate size is constructed. -// -// If f is not nil and an error occurs while reading from the Loader, -// f may end up in an inconsistent state. -func (l *Loader) Load(f *Filter) (*Filter, error) { - if f == nil { - nbits := BlockBits * l.nblocks - if nbits > MaxBits { - return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks) - } - f = New(nbits, int(l.nhashes)) - } else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil { - return nil, err - } - - for i := range f.b { - if err := l.fillbuf(); err != nil { - return nil, err - } - - for j := range f.b[i] { - f.b[i][j] |= binary.LittleEndian.Uint32(l.buf[4*j:]) - } - } - - return f, nil -} - -// Load sets f to the union of f and the Loader's filter, then returns f. -// If f is nil, a new SyncFilter of the appropriate size is constructed. -// Else, LoadSync may run concurrently with other modifications to f. -// -// If f is not nil and an error occurs while reading from the Loader, -// f may end up in an inconsistent state. -func (l *Loader) LoadSync(f *SyncFilter) (*SyncFilter, error) { - if f == nil { - nbits := BlockBits * l.nblocks - if nbits > MaxBits { - return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks) - } - f = NewSync(nbits, int(l.nhashes)) - } else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil { - return nil, err - } - - for i := range f.b { - if err := l.fillbuf(); err != nil { - return nil, err - } - - for j := range f.b[i] { - p := &f.b[i][j] - x := binary.LittleEndian.Uint32(l.buf[4*j:]) - - for { - old := atomic.LoadUint32(p) - if atomic.CompareAndSwapUint32(p, old, old|x) { - break - } - } - } - } - - return f, nil -} - -func (l *Loader) checkBitsAndHashes(nblocks, nhashes int) error { - switch { - case nblocks != int(l.nblocks): - return fmt.Errorf("blobloom: Filter has %d blocks, but dump has %d", nblocks, l.nblocks) - case nhashes != l.nhashes: - return fmt.Errorf("blobloom: Filter has %d hashes, but dump has %d", nhashes, l.nhashes) - } - return nil -} - -func (l *Loader) fillbuf() error { - _, err := io.ReadFull(l.r, l.buf[:]) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err -} - -func checkComment(p []byte) ([]byte, error) { - eos := bytes.IndexByte(p, 0) - if eos != -1 { - tail := p[eos+1:] - if !bytes.Equal(tail, make([]byte, len(tail))) { - return nil, fmt.Errorf("blobloom: comment block %q contains zero byte", p) - } - p = p[:eos] - } - return p, nil -} diff --git a/vendor/github.com/greatroar/blobloom/optimize.go b/vendor/github.com/greatroar/blobloom/optimize.go deleted file mode 100644 index 0497e77..0000000 --- a/vendor/github.com/greatroar/blobloom/optimize.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2020 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package blobloom - -import "math" - -// A Config holds parameters for Optimize or NewOptimized. -type Config struct { - // Trigger the "contains filtered or unexported fields" message for - // forward compatibility and force the caller to use named fields. - _ struct{} - - // Capacity is the expected number of distinct keys to be added. - // More keys can always be added, but the false positive rate can be - // expected to drop below FPRate if their number exceeds the Capacity. - Capacity uint64 - - // Desired lower bound on the false positive rate when the Bloom filter - // has been filled to its capacity. FPRate must be between zero - // (exclusive) and one (inclusive). - FPRate float64 - - // Maximum size of the Bloom filter in bits. Zero means the global - // MaxBits constant. A value less than BlockBits means BlockBits. - MaxBits uint64 -} - -// NewOptimized is shorthand for New(Optimize(config)). -func NewOptimized(config Config) *Filter { - return New(Optimize(config)) -} - -// NewSyncOptimized is shorthand for New(Optimize(config)). -func NewSyncOptimized(config Config) *SyncFilter { - return NewSync(Optimize(config)) -} - -// Optimize returns numbers of keys and hash functions that achieve the -// desired false positive described by config. -// -// Optimize panics when config.FPRate is invalid. -// -// The estimated number of bits is imprecise for false positives rates below -// ca. 1e-15. -func Optimize(config Config) (nbits uint64, nhashes int) { - n := float64(config.Capacity) - p := config.FPRate - - if p <= 0 || p > 1 { - panic("false positive rate for a Bloom filter must be > 0, <= 1") - } - if n == 0 { - // Assume the client wants to add at least one key; log2(0) = -inf. - n = 1 - } - - // The optimal nbits/n is c = -log2(p) / ln(2) for a vanilla Bloom filter. - c := math.Ceil(-math.Log2(p) / math.Ln2) - if c < float64(len(correctC)) { - c = float64(correctC[int(c)]) - } else { - // We can't achieve the desired FPR. Just triple the number of bits. - c *= 3 - } - nbits = uint64(c * n) - - // Round up to a multiple of BlockBits. - if nbits%BlockBits != 0 { - nbits += BlockBits - nbits%BlockBits - } - - var maxbits uint64 = MaxBits - if config.MaxBits != 0 && config.MaxBits < maxbits { - maxbits = config.MaxBits - if maxbits < BlockBits { - maxbits = BlockBits - } - } - if nbits > maxbits { - nbits = maxbits - // Round down to a multiple of BlockBits. - nbits -= nbits % BlockBits - } - - // The corresponding optimal number of hash functions is k = c * log(2). - // Try rounding up and down to see which rounding is better. - c = float64(nbits) / n - k := c * math.Ln2 - if k < 1 { - nhashes = 1 - return nbits, nhashes - } - - ceilK, floorK := math.Floor(k), math.Ceil(k) - if ceilK == floorK { - return nbits, int(ceilK) - } - - fprCeil, _ := fpRate(c, math.Ceil(k)) - fprFloor, _ := fpRate(c, math.Floor(k)) - if fprFloor < fprCeil { - k = floorK - } else { - k = ceilK - } - - return nbits, int(k) -} - -// correctC maps c = m/n for a vanilla Bloom filter to the c' for a -// blocked Bloom filter. -// -// This is Putze et al.'s Table I, extended down to zero. -// For c > 34, the values become huge and are hard to compute. -var correctC = []byte{ - 1, 1, 2, 4, 5, - 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 20, 21, 23, - 25, 26, 28, 30, 32, 35, 38, 40, 44, 48, 51, 58, 64, 74, 90, -} - -// FPRate computes an estimate of the false positive rate of a Bloom filter -// after nkeys distinct keys have been added. -func FPRate(nkeys, nbits uint64, nhashes int) float64 { - if nkeys == 0 { - return 0 - } - p, _ := fpRate(float64(nbits)/float64(nkeys), float64(nhashes)) - return p -} - -func fpRate(c, k float64) (p float64, iter int) { - switch { - case c == 0: - panic("0 bits per key is too few") - case k == 0: - panic("0 hashes is too few") - } - - // Putze et al.'s Equation (3). - // - // The Poisson distribution has a single spike around its mean - // BlockBits/c that gets slimmer and further away from zero as c tends - // to zero (the Bloom filter gets more filled). We start at the mean, - // then add terms left and right of it until their relative contribution - // drops below ε. - const ε = 1e-9 - mean := BlockBits / c - - // Ceil to make sure we start at one, not zero. - i := math.Ceil(mean) - p = math.Exp(logPoisson(mean, i) + logFprBlock(BlockBits/i, k)) - - for j := i - 1; j > 0; j-- { - add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k)) - p += add - iter++ - if add/p < ε { - break - } - } - - for j := i + 1; ; j++ { - add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k)) - p += add - iter++ - if add/p < ε { - break - } - } - - return p, iter -} - -// FPRate computes an estimate of f's false positive rate after nkeys distinct -// keys have been added. -func (f *Filter) FPRate(nkeys uint64) float64 { - return FPRate(nkeys, f.NumBits(), f.k) -} - -// Log of the FPR of a single block, FPR = (1 - exp(-k/c))^k. -func logFprBlock(c, k float64) float64 { - return k * math.Log1p(-math.Exp(-k/c)) -} - -// Log of the Poisson distribution's pmf. -func logPoisson(λ, k float64) float64 { - lg, _ := math.Lgamma(k + 1) - return k*math.Log(λ) - λ - lg -} diff --git a/vendor/github.com/greatroar/blobloom/setop_64bit.go b/vendor/github.com/greatroar/blobloom/setop_64bit.go deleted file mode 100644 index b588038..0000000 --- a/vendor/github.com/greatroar/blobloom/setop_64bit.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2020-2022 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (amd64 || arm64) && !nounsafe -// +build amd64 arm64 -// +build !nounsafe - -package blobloom - -import ( - "math/bits" - "sync/atomic" - "unsafe" -) - -// Block reinterpreted as array of uint64. -type block64 [BlockBits / 64]uint64 - -func (f *Filter) intersect(g *Filter) { - a, b := f.b, g.b - for len(a) >= 2 && len(b) >= 2 { - p := (*block64)(unsafe.Pointer(&a[0])) - q := (*block64)(unsafe.Pointer(&b[0])) - - p[0] &= q[0] - p[1] &= q[1] - p[2] &= q[2] - p[3] &= q[3] - p[4] &= q[4] - p[5] &= q[5] - p[6] &= q[6] - p[7] &= q[7] - - p = (*block64)(unsafe.Pointer(&a[1])) - q = (*block64)(unsafe.Pointer(&b[1])) - - p[0] &= q[0] - p[1] &= q[1] - p[2] &= q[2] - p[3] &= q[3] - p[4] &= q[4] - p[5] &= q[5] - p[6] &= q[6] - p[7] &= q[7] - - a, b = a[2:], b[2:] - } - - if len(a) > 0 && len(b) > 0 { - p := (*block64)(unsafe.Pointer(&a[0])) - q := (*block64)(unsafe.Pointer(&b[0])) - - p[0] &= q[0] - p[1] &= q[1] - p[2] &= q[2] - p[3] &= q[3] - p[4] &= q[4] - p[5] &= q[5] - p[6] &= q[6] - p[7] &= q[7] - } -} - -func (f *Filter) union(g *Filter) { - a, b := f.b, g.b - for len(a) >= 2 && len(b) >= 2 { - p := (*block64)(unsafe.Pointer(&a[0])) - q := (*block64)(unsafe.Pointer(&b[0])) - - p[0] |= q[0] - p[1] |= q[1] - p[2] |= q[2] - p[3] |= q[3] - p[4] |= q[4] - p[5] |= q[5] - p[6] |= q[6] - p[7] |= q[7] - - p = (*block64)(unsafe.Pointer(&a[1])) - q = (*block64)(unsafe.Pointer(&b[1])) - - p[0] |= q[0] - p[1] |= q[1] - p[2] |= q[2] - p[3] |= q[3] - p[4] |= q[4] - p[5] |= q[5] - p[6] |= q[6] - p[7] |= q[7] - - a, b = a[2:], b[2:] - } - - if len(a) > 0 && len(b) > 0 { - p := (*block64)(unsafe.Pointer(&a[0])) - q := (*block64)(unsafe.Pointer(&b[0])) - - p[0] |= q[0] - p[1] |= q[1] - p[2] |= q[2] - p[3] |= q[3] - p[4] |= q[4] - p[5] |= q[5] - p[6] |= q[6] - p[7] |= q[7] - } -} - -func onescount(b *block) (n int) { - p := (*block64)(unsafe.Pointer(&b[0])) - - n += bits.OnesCount64(p[0]) - n += bits.OnesCount64(p[1]) - n += bits.OnesCount64(p[2]) - n += bits.OnesCount64(p[3]) - n += bits.OnesCount64(p[4]) - n += bits.OnesCount64(p[5]) - n += bits.OnesCount64(p[6]) - n += bits.OnesCount64(p[7]) - - return n -} - -func onescountAtomic(b *block) (n int) { - p := (*block64)(unsafe.Pointer(&b[0])) - - n += bits.OnesCount64(atomic.LoadUint64(&p[0])) - n += bits.OnesCount64(atomic.LoadUint64(&p[1])) - n += bits.OnesCount64(atomic.LoadUint64(&p[2])) - n += bits.OnesCount64(atomic.LoadUint64(&p[3])) - n += bits.OnesCount64(atomic.LoadUint64(&p[4])) - n += bits.OnesCount64(atomic.LoadUint64(&p[5])) - n += bits.OnesCount64(atomic.LoadUint64(&p[6])) - n += bits.OnesCount64(atomic.LoadUint64(&p[7])) - - return n -} diff --git a/vendor/github.com/greatroar/blobloom/setop_other.go b/vendor/github.com/greatroar/blobloom/setop_other.go deleted file mode 100644 index 53749a2..0000000 --- a/vendor/github.com/greatroar/blobloom/setop_other.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2020-2022 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build (!amd64 && !arm64) || nounsafe -// +build !amd64,!arm64 nounsafe - -package blobloom - -import ( - "math/bits" - "sync/atomic" -) - -func (f *Filter) intersect(g *Filter) { - for i := range f.b { - f.b[i].intersect(&g.b[i]) - } -} - -func (f *Filter) union(g *Filter) { - for i := range f.b { - f.b[i].union(&g.b[i]) - } -} - -func (b *block) intersect(c *block) { - b[0] &= c[0] - b[1] &= c[1] - b[2] &= c[2] - b[3] &= c[3] - b[4] &= c[4] - b[5] &= c[5] - b[6] &= c[6] - b[7] &= c[7] - b[8] &= c[8] - b[9] &= c[9] - b[10] &= c[10] - b[11] &= c[11] - b[12] &= c[12] - b[13] &= c[13] - b[14] &= c[14] - b[15] &= c[15] -} - -func (b *block) union(c *block) { - b[0] |= c[0] - b[1] |= c[1] - b[2] |= c[2] - b[3] |= c[3] - b[4] |= c[4] - b[5] |= c[5] - b[6] |= c[6] - b[7] |= c[7] - b[8] |= c[8] - b[9] |= c[9] - b[10] |= c[10] - b[11] |= c[11] - b[12] |= c[12] - b[13] |= c[13] - b[14] |= c[14] - b[15] |= c[15] -} - -func onescount(b *block) (n int) { - n += bits.OnesCount32(b[0]) - n += bits.OnesCount32(b[1]) - n += bits.OnesCount32(b[2]) - n += bits.OnesCount32(b[3]) - n += bits.OnesCount32(b[4]) - n += bits.OnesCount32(b[5]) - n += bits.OnesCount32(b[6]) - n += bits.OnesCount32(b[7]) - n += bits.OnesCount32(b[8]) - n += bits.OnesCount32(b[9]) - n += bits.OnesCount32(b[10]) - n += bits.OnesCount32(b[11]) - n += bits.OnesCount32(b[12]) - n += bits.OnesCount32(b[13]) - n += bits.OnesCount32(b[14]) - n += bits.OnesCount32(b[15]) - - return n -} - -func onescountAtomic(b *block) (n int) { - n += bits.OnesCount32(atomic.LoadUint32(&b[0])) - n += bits.OnesCount32(atomic.LoadUint32(&b[1])) - n += bits.OnesCount32(atomic.LoadUint32(&b[2])) - n += bits.OnesCount32(atomic.LoadUint32(&b[3])) - n += bits.OnesCount32(atomic.LoadUint32(&b[4])) - n += bits.OnesCount32(atomic.LoadUint32(&b[5])) - n += bits.OnesCount32(atomic.LoadUint32(&b[6])) - n += bits.OnesCount32(atomic.LoadUint32(&b[7])) - n += bits.OnesCount32(atomic.LoadUint32(&b[8])) - n += bits.OnesCount32(atomic.LoadUint32(&b[9])) - n += bits.OnesCount32(atomic.LoadUint32(&b[10])) - n += bits.OnesCount32(atomic.LoadUint32(&b[11])) - n += bits.OnesCount32(atomic.LoadUint32(&b[12])) - n += bits.OnesCount32(atomic.LoadUint32(&b[13])) - n += bits.OnesCount32(atomic.LoadUint32(&b[14])) - n += bits.OnesCount32(atomic.LoadUint32(&b[15])) - - return n -} diff --git a/vendor/github.com/greatroar/blobloom/sync.go b/vendor/github.com/greatroar/blobloom/sync.go deleted file mode 100644 index 22503ba..0000000 --- a/vendor/github.com/greatroar/blobloom/sync.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2021-2022 the Blobloom authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package blobloom - -import "sync/atomic" - -// A SyncFilter is a Bloom filter that can be accessed and updated -// by multiple goroutines concurrently. -// -// A SyncFilter mostly behaves as a regular filter protected by a lock, -// -// type SyncFilter struct { -// Filter -// lock sync.Mutex -// } -// -// with each method taking and releasing the lock, -// but is implemented much more efficiently. -// See the method descriptions for exceptions to the previous rule. -type SyncFilter struct { - b []block // Shards. - k int // Number of hash functions required. -} - -// NewSync constructs a Bloom filter with given numbers of bits and hash functions. -// -// The number of bits should be at least BlockBits; smaller values are silently -// increased. -// -// The number of hashes reflects the number of hashes synthesized from the -// single hash passed in by the client. It is silently increased to two if -// a lower value is given. -func NewSync(nbits uint64, nhashes int) *SyncFilter { - nbits, nhashes = fixBitsAndHashes(nbits, nhashes) - - return &SyncFilter{ - b: make([]block, nbits/BlockBits), - k: nhashes, - } - -} - -// Add insert a key with hash value h into f. -func (f *SyncFilter) Add(h uint64) { - h1, h2 := uint32(h>>32), uint32(h) - b := getblock(f.b, h2) - - for i := 1; i < f.k; i++ { - h1, h2 = doublehash(h1, h2, i) - setbitAtomic(b, h1) - } -} - -// Cardinality estimates the number of distinct keys added to f. -// -// The estimate is most reliable when f is filled to roughly its capacity. -// It gets worse as f gets more densely filled. When one of the blocks is -// entirely filled, the estimate becomes +Inf. -// -// The return value is the maximum likelihood estimate of Papapetrou, Siberski -// and Nejdl, summed over the blocks -// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf). -// -// If other goroutines are concurrently adding keys, -// the estimate may lie in between what would have been returned -// before the concurrent updates started and what is returned -// after the updates complete. -func (f *SyncFilter) Cardinality() float64 { - return cardinality(f.k, f.b, onescountAtomic) -} - -// Empty reports whether f contains no keys. -// -// If other goroutines are concurrently adding keys, -// Empty may return a false positive. -func (f *SyncFilter) Empty() bool { - for i := 0; i < len(f.b); i++ { - for j := 0; j < blockWords; j++ { - if atomic.LoadUint32(&f.b[i][j]) != 0 { - return false - } - } - } - return true -} - -// Fill sets f to a completely full filter. -// After Fill, Has returns true for any key. -func (f *SyncFilter) Fill() { - for i := 0; i < len(f.b); i++ { - for j := 0; j < blockWords; j++ { - atomic.StoreUint32(&f.b[i][j], ^uint32(0)) - } - } -} - -// Has reports whether a key with hash value h has been added. -// It may return a false positive. -func (f *SyncFilter) Has(h uint64) bool { - h1, h2 := uint32(h>>32), uint32(h) - b := getblock(f.b, h2) - - for i := 1; i < f.k; i++ { - h1, h2 = doublehash(h1, h2, i) - if !getbitAtomic(b, h1) { - return false - } - } - return true -} - -// getbitAtomic reports whether bit (i modulo BlockBits) is set. -func getbitAtomic(b *block, i uint32) bool { - bit := uint32(1) << (i % wordSize) - x := atomic.LoadUint32(&(*b)[(i/wordSize)%blockWords]) - return x&bit != 0 -} - -// setbit sets bit (i modulo BlockBits) of b, atomically. -func setbitAtomic(b *block, i uint32) { - bit := uint32(1) << (i % wordSize) - p := &(*b)[(i/wordSize)%blockWords] - - for { - old := atomic.LoadUint32(p) - if old&bit != 0 { - // Checking here instead of checking the return value from - // the CAS is between 50% and 80% faster on the benchmark. - return - } - atomic.CompareAndSwapUint32(p, old, old|bit) - } -} diff --git a/vendor/github.com/greatroar/blobloom/test.sh b/vendor/github.com/greatroar/blobloom/test.sh deleted file mode 100644 index bf90f54..0000000 --- a/vendor/github.com/greatroar/blobloom/test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/sh - -set -e -x - -golangci-lint run . examples/* - -go test - -if [ "$(go env GOARCH)" = amd64 ]; then - go test -tags nounsafe - GOARCH=386 go test -fi - -for e in examples/*; do - (cd $e && go build && rm $(basename $e)) -done diff --git a/vendor/github.com/nbd-wtf/go-nostr/kinds.go b/vendor/github.com/nbd-wtf/go-nostr/kinds.go index feb79b3..e31e112 100644 --- a/vendor/github.com/nbd-wtf/go-nostr/kinds.go +++ b/vendor/github.com/nbd-wtf/go-nostr/kinds.go @@ -26,7 +26,7 @@ const ( KindChess int = 64 KindMergeRequests int = 818 KindBid int = 1021 - KIndBidConfirmation int = 1022 + KindBidConfirmation int = 1022 KindOpenTimestamps int = 1040 KindGiftWrap int = 1059 KindFileMetadata int = 1063 diff --git a/vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go b/vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go index ebefbf6..8d43662 100644 --- a/vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go +++ b/vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go @@ -5,8 +5,6 @@ import ( "fmt" "sync" - "github.com/cespare/xxhash" - "github.com/greatroar/blobloom" "github.com/nbd-wtf/go-nostr" "github.com/nbd-wtf/go-nostr/nip77/negentropy" "github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector" @@ -88,10 +86,7 @@ func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, fil go func(dir direction) { defer wg.Done() - seen := blobloom.NewOptimized(blobloom.Config{ - Capacity: 10000, - FPRate: 0.01, - }) + seen := make(map[string]struct{}) doSync := func(ids []string) { defer wg.Done() @@ -112,12 +107,11 @@ func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, fil ids := pool.grab() for item := range dir.items { - h := xxhash.Sum64([]byte(item)) - if seen.Has(h) { + if _, ok := seen[item]; ok { continue } + seen[item] = struct{}{} - seen.Add(h) ids = append(ids, item) if len(ids) == 50 { wg.Add(1) diff --git a/vendor/github.com/nbd-wtf/go-nostr/relay.go b/vendor/github.com/nbd-wtf/go-nostr/relay.go index 5825689..0319f24 100644 --- a/vendor/github.com/nbd-wtf/go-nostr/relay.go +++ b/vendor/github.com/nbd-wtf/go-nostr/relay.go @@ -182,11 +182,13 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error for { select { case <-ticker.C: - err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil) - if err != nil { - InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err) - r.Close() // this should trigger a context cancelation - return + if r.Connection != nil { + err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil) + if err != nil { + InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err) + r.Close() // this should trigger a context cancelation + return + } } case writeRequest := <-r.writeQueue: // all write requests will go through this to prevent races diff --git a/vendor/modules.txt b/vendor/modules.txt index da9f729..b0235eb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -17,7 +17,9 @@ github.com/btcsuite/btcd/btcec/v2/schnorr github.com/btcsuite/btcd/chaincfg/chainhash # github.com/cespare/xxhash v1.1.0 ## explicit -github.com/cespare/xxhash +# github.com/davecgh/go-spew v1.1.1 +## explicit +github.com/davecgh/go-spew/spew # github.com/decred/dcrd/crypto/blake256 v1.1.0 ## explicit; go 1.17 github.com/decred/dcrd/crypto/blake256 @@ -33,7 +35,7 @@ github.com/fasthttp/websocket ## explicit; go 1.23.1 github.com/fiatjaf/eventstore github.com/fiatjaf/eventstore/postgresql -# github.com/fiatjaf/khatru v0.12.0 +# github.com/fiatjaf/khatru v0.12.1 ## explicit; go 1.23.1 github.com/fiatjaf/khatru github.com/fiatjaf/khatru/policies @@ -53,7 +55,6 @@ github.com/gobwas/ws/wsflate github.com/gobwas/ws/wsutil # github.com/greatroar/blobloom v0.8.0 ## explicit; go 1.14 -github.com/greatroar/blobloom # github.com/jmoiron/sqlx v1.4.0 ## explicit; go 1.10 github.com/jmoiron/sqlx @@ -90,7 +91,7 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/nbd-wtf/go-nostr v0.42.2 +# github.com/nbd-wtf/go-nostr v0.42.3 ## explicit; go 1.23.1 github.com/nbd-wtf/go-nostr github.com/nbd-wtf/go-nostr/nip11