replace lightningpub with alby
This commit is contained in:
parent
14a8af3f16
commit
00641d2625
54 changed files with 4127 additions and 2234 deletions
219
alby/well-known.go
Normal file
219
alby/well-known.go
Normal file
|
@ -0,0 +1,219 @@
|
|||
package alby
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"git.devvul.com/asara/gologger"
|
||||
"git.devvul.com/asara/well-goknown/config"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/gorilla/schema"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
var (
|
||||
DB *sqlx.DB
|
||||
qsDecoder = schema.NewDecoder()
|
||||
)
|
||||
|
||||
type AlbyApp struct {
|
||||
Id int32 `json:"id"`
|
||||
Name string `json:"name"`
|
||||
NostrPubkey string `json:"nostrPubkey"`
|
||||
}
|
||||
|
||||
type AlbyApps []AlbyApp
|
||||
|
||||
type lnurlp struct {
|
||||
Status string `json:"status"`
|
||||
Tag string `json:"tag"`
|
||||
CommentAllowed int32 `json:"commentAllowed"`
|
||||
Callback string `json:"callback"`
|
||||
MinSendable int64 `json:"minSendable"`
|
||||
MaxSendable int64 `json:"maxSendable"`
|
||||
Metadata string `json:"metadata"`
|
||||
AllowsNostr bool `json:"allowsNostr"`
|
||||
NostrPubkey string `json:"nostrPubkey"`
|
||||
}
|
||||
|
||||
type lnurlpError struct {
|
||||
Status string `json:"status"`
|
||||
Reason string `json:"reason"`
|
||||
}
|
||||
|
||||
type NWCReqNostr struct {
|
||||
Id string `json:"id"`
|
||||
Pubkey string `json:"pubkey"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
Kind int32 `json:"kind"`
|
||||
Tags [][]string `json:"tags"`
|
||||
Content string `json:"content"`
|
||||
Signature string `json:"sig"`
|
||||
}
|
||||
|
||||
type NWCReq struct {
|
||||
Nostr string `json:"nostr"`
|
||||
Amount string `json:"amount"`
|
||||
Comment string `json:"comment"`
|
||||
}
|
||||
|
||||
type NWCSecret struct {
|
||||
Name string
|
||||
Domain string
|
||||
Wallet string
|
||||
ClientPubkey string
|
||||
AppPubkey string
|
||||
Relay string
|
||||
Secret string
|
||||
}
|
||||
|
||||
func (s *NWCSecret) decodeSecret() {
|
||||
l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
|
||||
u, err := url.Parse(s.Wallet)
|
||||
if err != nil {
|
||||
l.Error().Msgf("failed")
|
||||
}
|
||||
s.AppPubkey = u.Host
|
||||
q, _ := url.ParseQuery(u.RawQuery)
|
||||
s.Relay = q.Get("relay")
|
||||
s.Secret = q.Get("secret")
|
||||
|
||||
}
|
||||
|
||||
func GetLnurlp(w http.ResponseWriter, r *http.Request) {
|
||||
l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
|
||||
albyAdmin := config.GetConfig().AlbyAdminAuth
|
||||
|
||||
// setup response type
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// normalize domain
|
||||
domain, _, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
domain = r.Host
|
||||
}
|
||||
|
||||
name := r.PathValue("name")
|
||||
|
||||
// get all alby apps
|
||||
client := http.Client{}
|
||||
req, err := http.NewRequest("GET", "https://alby.devvul.com/api/apps", nil)
|
||||
if err != nil {
|
||||
l.Error().Msgf("unable to generate alby request for %s@%s: %s", name, domain, err.Error())
|
||||
lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unknown error"}
|
||||
retError, _ := json.Marshal(lnurlpReturnError)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write(retError)
|
||||
return
|
||||
}
|
||||
|
||||
req.Header = http.Header{"Authorization": {fmt.Sprintf("Bearer %s", albyAdmin)}}
|
||||
resp, err := client.Do(req)
|
||||
defer resp.Body.Close()
|
||||
|
||||
var albyApps AlbyApps
|
||||
err = json.NewDecoder(resp.Body).Decode(&albyApps)
|
||||
if err != nil {
|
||||
l.Error().Msgf("unable to unmarshal alby request for %s@%s: %s", name, domain, err.Error())
|
||||
lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unknown error"}
|
||||
retError, _ := json.Marshal(lnurlpReturnError)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write(retError)
|
||||
return
|
||||
}
|
||||
|
||||
// check if user exists
|
||||
var npk string
|
||||
for _, element := range albyApps {
|
||||
if element.Name == name {
|
||||
npk = element.NostrPubkey
|
||||
}
|
||||
}
|
||||
|
||||
if len(npk) == 0 {
|
||||
l.Debug().Msgf("user doesn't exist in alby %s@%s: %s", name, domain, err.Error())
|
||||
lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "user does not exist"}
|
||||
retError, _ := json.Marshal(lnurlpReturnError)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write(retError)
|
||||
return
|
||||
}
|
||||
|
||||
// get server pubkey
|
||||
var secret NWCSecret
|
||||
err = DB.QueryRow("SELECT name, domain, wallet, pubkey FROM lnwallets WHERE name=$1 AND domain=$2", name, domain).
|
||||
Scan(&secret.Name, &secret.Domain, &secret.Wallet, &secret.ClientPubkey)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user doesn't exist in alby %s@%s: %s", name, domain, err.Error())
|
||||
lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "user does not exist"}
|
||||
retError, _ := json.Marshal(lnurlpReturnError)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write(retError)
|
||||
return
|
||||
}
|
||||
|
||||
secret.decodeSecret()
|
||||
spew.Dump(secret)
|
||||
|
||||
lnurlpReturn := &lnurlp{
|
||||
Status: "OK",
|
||||
Tag: "payRequest",
|
||||
CommentAllowed: 255,
|
||||
Callback: fmt.Sprintf("https://%s/.well-known/lnurlp/%s/callback", domain, name),
|
||||
MinSendable: 1000,
|
||||
MaxSendable: 10000000,
|
||||
Metadata: fmt.Sprintf("[[\"text/plain\", \"ln address payment to %s on the devvul server\"],[\"text/identifier\", \"%s@%s\"]]", name, name, domain),
|
||||
AllowsNostr: true,
|
||||
NostrPubkey: npk,
|
||||
}
|
||||
|
||||
ret, err := json.Marshal(lnurlpReturn)
|
||||
if err != nil {
|
||||
l.Error().Msgf("unable to marshal json for %s@%s: %s", name, domain, err.Error())
|
||||
lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "User not found"}
|
||||
retError, _ := json.Marshal(lnurlpReturnError)
|
||||
w.WriteHeader(http.StatusNotFound)
|
||||
w.Write(retError)
|
||||
return
|
||||
}
|
||||
|
||||
l.Debug().Msgf("returning pay request callback for %s@%s", name, domain)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(ret)
|
||||
return
|
||||
}
|
||||
|
||||
func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
|
||||
l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
|
||||
//ctx := context.Background()
|
||||
var nwc NWCReq
|
||||
var nwcNostr NWCReqNostr
|
||||
|
||||
// normalize domain
|
||||
domain, _, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
domain = r.Host
|
||||
}
|
||||
name := r.PathValue("name")
|
||||
|
||||
err = qsDecoder.Decode(&nwc, r.URL.Query())
|
||||
if err != nil {
|
||||
l.Error().Msgf("unable to marshal json for %s@%s: %s", name, domain, err.Error())
|
||||
}
|
||||
|
||||
json.Unmarshal([]byte(nwc.Nostr), &nwcNostr)
|
||||
|
||||
//
|
||||
// ev := nostr.Event{
|
||||
// PubKey: nwc.Pubkey,
|
||||
// CreatedAt: nostr.Now(),
|
||||
// Kind: nostr.KindNWCWalletRequest,
|
||||
// Tags: nil,
|
||||
// Content: "",
|
||||
// }
|
||||
//
|
||||
// ev.Sign()
|
||||
}
|
|
@ -17,6 +17,7 @@ type (
|
|||
RelayDescription string
|
||||
RelayIcon string
|
||||
RelayContact string
|
||||
AlbyAdminAuth string
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -33,6 +34,7 @@ func GetConfig() Config {
|
|||
RelayDescription: getEnv("RELAY_DESCRIPTION", ""),
|
||||
RelayIcon: getEnv("RELAY_ICON", ""),
|
||||
RelayContact: getEnv("RELAY_CONTACT", ""),
|
||||
AlbyAdminAuth: getEnv("ALBY_ADMIN_AUTH", ""),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
8
go.mod
8
go.mod
|
@ -4,11 +4,13 @@ go 1.23.3
|
|||
|
||||
require (
|
||||
git.devvul.com/asara/gologger v0.9.0
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/fiatjaf/eventstore v0.14.0
|
||||
github.com/fiatjaf/khatru v0.12.0
|
||||
github.com/fiatjaf/khatru v0.12.1
|
||||
github.com/gorilla/schema v1.4.1
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/nbd-wtf/go-nostr v0.42.2
|
||||
github.com/nbd-wtf/go-nostr v0.42.3
|
||||
)
|
||||
|
||||
require (
|
||||
|
@ -16,14 +18,12 @@ require (
|
|||
github.com/bep/debounce v1.2.1 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/fasthttp/websocket v1.5.10 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/gobwas/ws v1.4.0 // indirect
|
||||
github.com/greatroar/blobloom v0.8.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
|
|
25
go.sum
25
go.sum
|
@ -2,8 +2,6 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
|||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
git.devvul.com/asara/gologger v0.9.0 h1:gijJpkPjvzI5S/dmAXgYoKJbp5uuaETAOBYWo7bJg6U=
|
||||
git.devvul.com/asara/gologger v0.9.0/go.mod h1:APr1DdVYByFfPUGHqHtRMhxphQbj92/vT/t0iM40H/0=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
|
@ -12,10 +10,7 @@ github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurT
|
|||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
|
@ -26,8 +21,8 @@ github.com/fasthttp/websocket v1.5.10 h1:bc7NIGyrg1L6sd5pRzCIbXpro54SZLEluZCu0rO
|
|||
github.com/fasthttp/websocket v1.5.10/go.mod h1:BwHeuXGWzCW1/BIKUKD3+qfCl+cTdsHu/f243NcAI/Q=
|
||||
github.com/fiatjaf/eventstore v0.14.0 h1:eAyugJGFRCrXYJLCc2nC/BIApmBbQN/Z4dxvNz1SIvI=
|
||||
github.com/fiatjaf/eventstore v0.14.0/go.mod h1:XOl5B6WGBX1a0ww6s3WT94QVOmye/6zDTtyWHVtHQ5U=
|
||||
github.com/fiatjaf/khatru v0.12.0 h1:pOWyahXl9UoyFTj/tX4Y3eM8nqGRHwMqM4F8ed7O3A0=
|
||||
github.com/fiatjaf/khatru v0.12.0/go.mod h1:GfKKAR27sMxBmepv709QnL7C9lEmlhaj41LFm/ueATc=
|
||||
github.com/fiatjaf/khatru v0.12.1 h1:J7GlQy/Be0nAXH9JdS9jVMv2JdwLQhSu7TK3ZbiFZh4=
|
||||
github.com/fiatjaf/khatru v0.12.1/go.mod h1:GfKKAR27sMxBmepv709QnL7C9lEmlhaj41LFm/ueATc=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
|
@ -37,8 +32,8 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm
|
|||
github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/greatroar/blobloom v0.8.0 h1:I9RlEkfqK9/6f1v9mFmDYegDQ/x0mISCpiNpAm23Pt4=
|
||||
github.com/greatroar/blobloom v0.8.0/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
|
||||
github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
|
||||
github.com/gorilla/schema v1.4.1/go.mod h1:Dg5SSm5PV60mhF2NFaTV1xuYYj8tV8NOPRo4FggUMnM=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
@ -57,8 +52,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/nbd-wtf/go-nostr v0.42.2 h1:X8vpfLutvmyxqjsroKPHdIyPliNa6sYD8+CA0kDVySw=
|
||||
github.com/nbd-wtf/go-nostr v0.42.2/go.mod h1:FBa4FBJO7NuANvkeKSlrf0BIyxGufmrUbuelr6Q4Ick=
|
||||
github.com/nbd-wtf/go-nostr v0.42.3 h1:wimwmXLhF9ScrNTG4by3eSj2p7HUGkLUospX4bHjxQk=
|
||||
github.com/nbd-wtf/go-nostr v0.42.3/go.mod h1:p29g9i1UiSBKdyXkNa6V8rFqE+wrIn4UY0Emabwdu6A=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -71,12 +66,6 @@ github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
|||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
|
@ -101,7 +90,5 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
|
@ -1,87 +0,0 @@
|
|||
package lightningpub
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"git.devvul.com/asara/gologger"
|
||||
"git.devvul.com/asara/well-goknown/config"
|
||||
"github.com/jmoiron/sqlx"
|
||||
)
|
||||
|
||||
var (
|
||||
DB *sqlx.DB
|
||||
)
|
||||
|
||||
type lnurlp struct {
|
||||
Tag string `json:"tag"`
|
||||
Callback string `json:"callback"`
|
||||
MaxSendable int64 `json:"maxSendable"`
|
||||
MinSendable int64 `json:"minSendable"`
|
||||
Metadata string `json:"metadata"`
|
||||
AllowsNostr bool `json:"allowsNostr"`
|
||||
NostrPubkey string `json:"nostrPubkey"`
|
||||
}
|
||||
|
||||
func GetLnurlp(w http.ResponseWriter, r *http.Request) {
|
||||
l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
|
||||
|
||||
// normalize domain
|
||||
domain, _, err := net.SplitHostPort(r.Host)
|
||||
if err != nil {
|
||||
domain = r.Host
|
||||
}
|
||||
|
||||
name := r.PathValue("name")
|
||||
var lnwallet string
|
||||
err = DB.QueryRow("SELECT wallet FROM lnwallets WHERE name=$1 AND domain=$2", name, domain).Scan(&lnwallet)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error())
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
//upstreamUrl := fmt.Sprintf("https://%s/api/guest/lnurl_pay/info?k1=%s", domain, lnwallet)
|
||||
upstreamUrl := fmt.Sprintf("https://%s/api/guest/lnurl_pay/info?k1=%s", domain, lnwallet)
|
||||
upstreamPayload, err := http.Get(upstreamUrl)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error())
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
defer upstreamPayload.Body.Close()
|
||||
body, err := ioutil.ReadAll(upstreamPayload.Body)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error())
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
lnurlpReturn := lnurlp{}
|
||||
err = json.Unmarshal(body, &lnurlpReturn)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error())
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
m := fmt.Sprintf("[[\"text/plain\", \"ln address payment to a user on the devvul server\"],[\"text/identifier\", \"%s@%s\"]]", name, domain)
|
||||
lnurlpReturn.Metadata = m
|
||||
|
||||
ret, err := json.Marshal(lnurlpReturn)
|
||||
if err != nil {
|
||||
l.Debug().Msgf("user (%s@%s) doesn't exist: %s", name, domain, err.Error())
|
||||
http.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
l.Debug().Msgf("returning lnwallet for %s@%s", name, domain)
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.Write(ret)
|
||||
return
|
||||
}
|
8
main.go
8
main.go
|
@ -4,9 +4,9 @@ import (
|
|||
"net/http"
|
||||
|
||||
"git.devvul.com/asara/gologger"
|
||||
"git.devvul.com/asara/well-goknown/alby"
|
||||
"git.devvul.com/asara/well-goknown/config"
|
||||
"git.devvul.com/asara/well-goknown/db"
|
||||
"git.devvul.com/asara/well-goknown/lightningpub"
|
||||
"git.devvul.com/asara/well-goknown/matrix"
|
||||
"git.devvul.com/asara/well-goknown/nostr"
|
||||
"github.com/fiatjaf/eventstore/postgresql"
|
||||
|
@ -22,7 +22,6 @@ func main() {
|
|||
db, _ := db.NewDB()
|
||||
defer db.Close()
|
||||
|
||||
lightningpub.DB = db
|
||||
nostr.DB = db
|
||||
nostr.RelayDb = postgresql.PostgresBackend{DatabaseURL: config.GetConfig().DbUrl}
|
||||
if err := nostr.RelayDb.Init(); err != nil {
|
||||
|
@ -30,6 +29,8 @@ func main() {
|
|||
}
|
||||
relay := nostr.NewRelay(version)
|
||||
|
||||
alby.DB = db
|
||||
|
||||
// matrix endpoints
|
||||
l.Debug().Msg("enabling matrix well-known endpoints")
|
||||
http.HandleFunc("/.well-known/matrix/server", matrix.MatrixServer)
|
||||
|
@ -42,7 +43,8 @@ func main() {
|
|||
|
||||
// lnurlp endpoint
|
||||
l.Debug().Msg("enabling lnurlp well-known endpoint")
|
||||
http.HandleFunc("/.well-known/lnurlp/{name}", lightningpub.GetLnurlp)
|
||||
http.HandleFunc("/.well-known/lnurlp/{name}", alby.GetLnurlp)
|
||||
http.HandleFunc("/.well-known/lnurlp/{name}/callback", alby.GetLnurlpCallback)
|
||||
|
||||
// start server
|
||||
port := config.GetConfig().ListenAddr
|
||||
|
|
3
migrations/000003_add_pubkey_to_wallets.up.sql
Normal file
3
migrations/000003_add_pubkey_to_wallets.up.sql
Normal file
|
@ -0,0 +1,3 @@
|
|||
BEGIN;
|
||||
ALTER TABLE lnwallets ADD COLUMN pubkey TEXT;
|
||||
COMMIT;
|
|
@ -15,12 +15,16 @@ func RejectUnregisteredNpubs(ctx context.Context, event *nostr.Event) (reject bo
|
|||
|
||||
// always allow the following kinds
|
||||
// 13: nip-59 seals
|
||||
// 9734: nip-57 zap request
|
||||
// 9735: nip-57 zap receipt
|
||||
// 21000: lightning.pub rpc
|
||||
// 13194: nip-47 info event
|
||||
// 22242: nip-42 client auth
|
||||
// 23194: nip-47 request
|
||||
// 23195: nip-47 response
|
||||
// 23196: nip-47 notification
|
||||
// 30078: nip-78 addressable events
|
||||
switch event.Kind {
|
||||
case 13, 9735, 21000, 22242, 30078:
|
||||
case 13, 9734, 9735, 13194, 22242, 23194, 23195, 23196, 30078:
|
||||
return false, ""
|
||||
}
|
||||
|
||||
|
|
|
@ -8,3 +8,4 @@ export RELAY_DESCRIPTION="nostr relay running via git.devvul.com/asara/well-gokn
|
|||
export RELAY_ICON=""
|
||||
export RELAY_NAME="Nostr Relay"
|
||||
export RELAY_PUBKEY=""
|
||||
export ALBY_ADMIN_AUTH=""
|
||||
|
|
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
|
@ -1,22 +0,0 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
|
@ -1,50 +0,0 @@
|
|||
# xxhash
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
The API is very small, taking its cue from the other hashing packages in the
|
||||
standard library:
|
||||
|
||||
$ go doc github.com/cespare/xxhash !
|
||||
package xxhash // import "github.com/cespare/xxhash"
|
||||
|
||||
Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
at http://cyan4973.github.io/xxHash/.
|
||||
|
||||
func New() hash.Hash64
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64 against another popular Go XXH64 implementation,
|
||||
[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
|
||||
|
||||
| input size | OneOfOne | cespare (purego) | cespare |
|
||||
| --- | --- | --- | --- |
|
||||
| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
|
||||
| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
|
||||
| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
|
||||
| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
|
||||
|
||||
These numbers were generated with:
|
||||
|
||||
```
|
||||
$ go test -benchtime 10s -bench '/OneOfOne,'
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,'
|
||||
$ go test -benchtime 10s -bench '/xxhash,'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
// +build !go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
|
||||
|
||||
func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
|
||||
func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
|
||||
func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
|
||||
func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
|
||||
func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
|
||||
func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
|
||||
func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
|
||||
func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
|
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
|
@ -1,14 +0,0 @@
|
|||
// +build go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
|
@ -1,168 +0,0 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
type xxh struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total int
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
|
||||
func New() hash.Hash64 {
|
||||
var x xxh
|
||||
x.Reset()
|
||||
return &x
|
||||
}
|
||||
|
||||
func (x *xxh) Reset() {
|
||||
x.n = 0
|
||||
x.total = 0
|
||||
x.v1 = prime1v + prime2
|
||||
x.v2 = prime2
|
||||
x.v3 = 0
|
||||
x.v4 = -prime1v
|
||||
}
|
||||
|
||||
func (x *xxh) Size() int { return 8 }
|
||||
func (x *xxh) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to x. It always returns len(b), nil.
|
||||
func (x *xxh) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
x.total += len(b)
|
||||
|
||||
if x.n+len(b) < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.n += len(b)
|
||||
return
|
||||
}
|
||||
|
||||
if x.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.v1 = round(x.v1, u64(x.mem[0:8]))
|
||||
x.v2 = round(x.v2, u64(x.mem[8:16]))
|
||||
x.v3 = round(x.v3, u64(x.mem[16:24]))
|
||||
x.v4 = round(x.v4, u64(x.mem[24:32]))
|
||||
b = b[32-x.n:]
|
||||
x.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
b = writeBlocks(x, b)
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(x.mem[:], b)
|
||||
x.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (x *xxh) Sum(b []byte) []byte {
|
||||
s := x.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
func (x *xxh) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if x.total >= 32 {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = x.v3 + prime5
|
||||
}
|
||||
|
||||
h += uint64(x.total)
|
||||
|
||||
i, end := 0, x.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(x.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(x.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(x.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte
|
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
|
@ -1,233 +0,0 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the x pointer.
|
||||
|
||||
// func writeBlocks(x *xxh, b []byte) []byte
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-56
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from x.
|
||||
MOVQ x+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to x.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// Construct return slice.
|
||||
// NOTE: It's important that we don't construct a slice that has a base
|
||||
// pointer off the end of the original slice, as in Go 1.7+ this will
|
||||
// cause runtime crashes. (See discussion in, for example,
|
||||
// https://github.com/golang/go/issues/16772.)
|
||||
// Therefore, we calculate the length/cap first, and if they're zero, we
|
||||
// keep the old base. This is what the compiler does as well if you
|
||||
// write code like
|
||||
// b = b[len(b):]
|
||||
|
||||
// New length is 32 - (CX - BX) -> BX+32 - CX.
|
||||
ADDQ $32, BX
|
||||
SUBQ CX, BX
|
||||
JZ afterSetBase
|
||||
|
||||
MOVQ CX, ret_base+32(FP)
|
||||
|
||||
afterSetBase:
|
||||
MOVQ BX, ret_len+40(FP)
|
||||
MOVQ BX, ret_cap+48(FP) // set cap == len
|
||||
|
||||
RET
|
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
|
@ -1,75 +0,0 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// x := New()
|
||||
// x.Write(b)
|
||||
// return x.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
|
||||
return b
|
||||
}
|
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
|
@ -1,10 +0,0 @@
|
|||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
|
@ -1,30 +0,0 @@
|
|||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
//
|
||||
// TODO(caleb): Consider removing this if an optimization is ever added to make
|
||||
// it unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// TODO(caleb): We still have a function call; we could instead write Go/asm
|
||||
// copies of Sum64 for strings to squeeze out a bit more speed.
|
||||
func Sum64String(s string) uint64 {
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about this unsafe conversion.
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
15
vendor/github.com/davecgh/go-spew/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,15 @@
|
|||
ISC License
|
||||
|
||||
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
145
vendor/github.com/davecgh/go-spew/spew/bypass.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is not running on Google App Engine, compiled by GopherJS, and
|
||||
// "-tags safe" is not added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// Go versions prior to 1.4 are disabled because they use a different layout
|
||||
// for interfaces which make the implementation of unsafeReflectValue more complex.
|
||||
// +build !js,!appengine,!safe,!disableunsafe,go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = false
|
||||
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
type flag uintptr
|
||||
|
||||
var (
|
||||
// flagRO indicates whether the value field of a reflect.Value
|
||||
// is read-only.
|
||||
flagRO flag
|
||||
|
||||
// flagAddr indicates whether the address of the reflect.Value's
|
||||
// value may be taken.
|
||||
flagAddr flag
|
||||
)
|
||||
|
||||
// flagKindMask holds the bits that make up the kind
|
||||
// part of the flags field. In all the supported versions,
|
||||
// it is in the lower 5 bits.
|
||||
const flagKindMask = flag(0x1f)
|
||||
|
||||
// Different versions of Go have used different
|
||||
// bit layouts for the flags type. This table
|
||||
// records the known combinations.
|
||||
var okFlags = []struct {
|
||||
ro, addr flag
|
||||
}{{
|
||||
// From Go 1.4 to 1.5
|
||||
ro: 1 << 5,
|
||||
addr: 1 << 7,
|
||||
}, {
|
||||
// Up to Go tip.
|
||||
ro: 1<<5 | 1<<6,
|
||||
addr: 1 << 8,
|
||||
}}
|
||||
|
||||
var flagValOffset = func() uintptr {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
return field.Offset
|
||||
}()
|
||||
|
||||
// flagField returns a pointer to the flag field of a reflect.Value.
|
||||
func flagField(v *reflect.Value) *flag {
|
||||
return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
|
||||
return v
|
||||
}
|
||||
flagFieldPtr := flagField(&v)
|
||||
*flagFieldPtr &^= flagRO
|
||||
*flagFieldPtr |= flagAddr
|
||||
return v
|
||||
}
|
||||
|
||||
// Sanity checks against future reflect package changes
|
||||
// to the type or semantics of the Value.flag field.
|
||||
func init() {
|
||||
field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
|
||||
if !ok {
|
||||
panic("reflect.Value has no flag field")
|
||||
}
|
||||
if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
|
||||
panic("reflect.Value flag field has changed kind")
|
||||
}
|
||||
type t0 int
|
||||
var t struct {
|
||||
A t0
|
||||
// t0 will have flagEmbedRO set.
|
||||
t0
|
||||
// a will have flagStickyRO set
|
||||
a t0
|
||||
}
|
||||
vA := reflect.ValueOf(t).FieldByName("A")
|
||||
va := reflect.ValueOf(t).FieldByName("a")
|
||||
vt0 := reflect.ValueOf(t).FieldByName("t0")
|
||||
|
||||
// Infer flagRO from the difference between the flags
|
||||
// for the (otherwise identical) fields in t.
|
||||
flagPublic := *flagField(&vA)
|
||||
flagWithRO := *flagField(&va) | *flagField(&vt0)
|
||||
flagRO = flagPublic ^ flagWithRO
|
||||
|
||||
// Infer flagAddr from the difference between a value
|
||||
// taken from a pointer and not.
|
||||
vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
|
||||
flagNoPtr := *flagField(&vA)
|
||||
flagPtr := *flagField(&vPtrA)
|
||||
flagAddr = flagNoPtr ^ flagPtr
|
||||
|
||||
// Check that the inferred flags tally with one of the known versions.
|
||||
for _, f := range okFlags {
|
||||
if flagRO == f.ro && flagAddr == f.addr {
|
||||
return
|
||||
}
|
||||
}
|
||||
panic("reflect.Value read-only flag has changed semantics")
|
||||
}
|
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
|
||||
//
|
||||
// Permission to use, copy, modify, and distribute this software for any
|
||||
// purpose with or without fee is hereby granted, provided that the above
|
||||
// copyright notice and this permission notice appear in all copies.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
|
||||
// NOTE: Due to the following build constraints, this file will only be compiled
|
||||
// when the code is running on Google App Engine, compiled by GopherJS, or
|
||||
// "-tags safe" is added to the go build command line. The "disableunsafe"
|
||||
// tag is deprecated and thus should not be used.
|
||||
// +build js appengine safe disableunsafe !go1.4
|
||||
|
||||
package spew
|
||||
|
||||
import "reflect"
|
||||
|
||||
const (
|
||||
// UnsafeDisabled is a build-time constant which specifies whether or
|
||||
// not access to the unsafe package is available.
|
||||
UnsafeDisabled = true
|
||||
)
|
||||
|
||||
// unsafeReflectValue typically converts the passed reflect.Value into a one
|
||||
// that bypasses the typical safety restrictions preventing access to
|
||||
// unaddressable and unexported data. However, doing this relies on access to
|
||||
// the unsafe package. This is a stub version which simply returns the passed
|
||||
// reflect.Value when the unsafe package is not available.
|
||||
func unsafeReflectValue(v reflect.Value) reflect.Value {
|
||||
return v
|
||||
}
|
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
341
vendor/github.com/davecgh/go-spew/spew/common.go
generated
vendored
Normal file
|
@ -0,0 +1,341 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe, when it's available,
|
||||
// to bypass these restrictions since this package does not mutate the
|
||||
// values.
|
||||
if !v.CanInterface() {
|
||||
if UnsafeDisabled {
|
||||
return false
|
||||
}
|
||||
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := v.Interface().(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
306
vendor/github.com/davecgh/go-spew/spew/config.go
generated
vendored
Normal file
|
@ -0,0 +1,306 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods. As a result, this option relies on
|
||||
// access to the unsafe package, so it will not have any effect when
|
||||
// running in environments without access to the unsafe package such as
|
||||
// Google App Engine or with the "safe" build tag specified.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// DisablePointerAddresses specifies whether to disable the printing of
|
||||
// pointer addresses. This is useful when diffing data structures in tests.
|
||||
DisablePointerAddresses bool
|
||||
|
||||
// DisableCapacities specifies whether to disable the printing of capacities
|
||||
// for arrays, slices, maps and channels. This is useful when diffing
|
||||
// data structures in tests.
|
||||
DisableCapacities bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
211
vendor/github.com/davecgh/go-spew/spew/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,211 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* DisablePointerAddresses
|
||||
DisablePointerAddresses specifies whether to disable the printing of
|
||||
pointer addresses. This is useful when diffing data structures in tests.
|
||||
|
||||
* DisableCapacities
|
||||
DisableCapacities specifies whether to disable the printing of
|
||||
capacities for arrays, slices, maps and channels. This is useful when
|
||||
diffing data structures in tests.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
509
vendor/github.com/davecgh/go-spew/spew/dump.go
generated
vendored
Normal file
|
@ -0,0 +1,509 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type
|
||||
// to a byte slice. However, the reflect package won't
|
||||
// give us an interface on certain things like
|
||||
// unexported struct fields in order to enforce
|
||||
// visibility rules. We use unsafe, when available, to
|
||||
// bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
if !UnsafeDisabled {
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be
|
||||
// type asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if !d.cs.DisableCapacities && valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
419
vendor/github.com/davecgh/go-spew/spew/format.go
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
148
vendor/github.com/davecgh/go-spew/spew/spew.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
/*
|
||||
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
14
vendor/github.com/fiatjaf/khatru/nip86.go
generated
vendored
14
vendor/github.com/fiatjaf/khatru/nip86.go
generated
vendored
|
@ -65,16 +65,22 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
|
|||
resp.Error = "missing auth"
|
||||
goto respond
|
||||
}
|
||||
if evtj, err := base64.StdEncoding.DecodeString(spl[1]); err != nil {
|
||||
|
||||
evtj, err := base64.StdEncoding.DecodeString(spl[1])
|
||||
if err != nil {
|
||||
resp.Error = "invalid base64 auth"
|
||||
goto respond
|
||||
} else if err := json.Unmarshal(evtj, &evt); err != nil {
|
||||
}
|
||||
if err := json.Unmarshal(evtj, &evt); err != nil {
|
||||
resp.Error = "invalid auth event json"
|
||||
goto respond
|
||||
} else if ok, _ := evt.CheckSignature(); !ok {
|
||||
}
|
||||
if ok, _ := evt.CheckSignature(); !ok {
|
||||
resp.Error = "invalid auth event"
|
||||
goto respond
|
||||
} else if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || getServiceBaseURL(r) != (*uTag)[1] {
|
||||
}
|
||||
|
||||
if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || rl.ServiceURL != (*uTag)[1] {
|
||||
resp.Error = "invalid 'u' tag"
|
||||
goto respond
|
||||
} else if pht := evt.Tags.GetFirst([]string{"payload", hex.EncodeToString(payloadHash[:])}); pht == nil {
|
||||
|
|
20
vendor/github.com/gorilla/schema/.editorconfig
generated
vendored
Normal file
20
vendor/github.com/gorilla/schema/.editorconfig
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
; https://editorconfig.org/
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
trim_trailing_whitespace = true
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
|
||||
[{Makefile,go.mod,go.sum,*.go,.gitmodules}]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
|
||||
[*.md]
|
||||
indent_size = 4
|
||||
trim_trailing_whitespace = false
|
||||
|
||||
eclint_indent_style = unset
|
1
vendor/github.com/gorilla/schema/.gitignore
generated
vendored
Normal file
1
vendor/github.com/gorilla/schema/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
coverage.coverprofile
|
27
vendor/github.com/gorilla/schema/LICENSE
generated
vendored
Normal file
27
vendor/github.com/gorilla/schema/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
34
vendor/github.com/gorilla/schema/Makefile
generated
vendored
Normal file
34
vendor/github.com/gorilla/schema/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '')
|
||||
GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
|
||||
GO_SEC=$(shell which gosec 2> /dev/null || echo '')
|
||||
GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
|
||||
GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '')
|
||||
GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest
|
||||
|
||||
.PHONY: golangci-lint
|
||||
golangci-lint:
|
||||
$(if $(GO_LINT), ,go install $(GO_LINT_URI))
|
||||
@echo "##### Running golangci-lint"
|
||||
golangci-lint run -v
|
||||
|
||||
.PHONY: gosec
|
||||
gosec:
|
||||
$(if $(GO_SEC), ,go install $(GO_SEC_URI))
|
||||
@echo "##### Running gosec"
|
||||
gosec ./...
|
||||
|
||||
.PHONY: govulncheck
|
||||
govulncheck:
|
||||
$(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI))
|
||||
@echo "##### Running govulncheck"
|
||||
govulncheck ./...
|
||||
|
||||
.PHONY: verify
|
||||
verify: golangci-lint gosec govulncheck
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "##### Running tests"
|
||||
go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./...
|
119
vendor/github.com/gorilla/schema/README.md
generated
vendored
Normal file
119
vendor/github.com/gorilla/schema/README.md
generated
vendored
Normal file
|
@ -0,0 +1,119 @@
|
|||
# gorilla/schema
|
||||
|
||||
![testing](https://github.com/gorilla/schema/actions/workflows/test.yml/badge.svg)
|
||||
[![codecov](https://codecov.io/github/gorilla/schema/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/schema)
|
||||
[![godoc](https://godoc.org/github.com/gorilla/schema?status.svg)](https://godoc.org/github.com/gorilla/schema)
|
||||
[![sourcegraph](https://sourcegraph.com/github.com/gorilla/schema/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/schema?badge)
|
||||
|
||||
|
||||
![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5)
|
||||
|
||||
Package gorilla/schema converts structs to and from form values.
|
||||
|
||||
## Example
|
||||
|
||||
Here's a quick example: we parse POST form values and then decode them into a struct:
|
||||
|
||||
```go
|
||||
// Set a Decoder instance as a package global, because it caches
|
||||
// meta-data about structs, and an instance can be shared safely.
|
||||
var decoder = schema.NewDecoder()
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Phone string
|
||||
}
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
var person Person
|
||||
|
||||
// r.PostForm is a map of our POST form values
|
||||
err = decoder.Decode(&person, r.PostForm)
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
// Do something with person.Name or person.Phone
|
||||
}
|
||||
```
|
||||
|
||||
Conversely, contents of a struct can be encoded into form values. Here's a variant of the previous example using the Encoder:
|
||||
|
||||
```go
|
||||
var encoder = schema.NewEncoder()
|
||||
|
||||
func MyHttpRequest() {
|
||||
person := Person{"Jane Doe", "555-5555"}
|
||||
form := url.Values{}
|
||||
|
||||
err := encoder.Encode(person, form)
|
||||
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
// Use form values, for example, with an http client
|
||||
client := new(http.Client)
|
||||
res, err := client.PostForm("http://my-api.test", form)
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
To define custom names for fields, use a struct tag "schema". To not populate certain fields, use a dash for the name and it will be ignored:
|
||||
|
||||
```go
|
||||
type Person struct {
|
||||
Name string `schema:"name,required"` // custom name, must be supplied
|
||||
Phone string `schema:"phone"` // custom name
|
||||
Admin bool `schema:"-"` // this field is never set
|
||||
}
|
||||
```
|
||||
|
||||
The supported field types in the struct are:
|
||||
|
||||
* bool
|
||||
* float variants (float32, float64)
|
||||
* int variants (int, int8, int16, int32, int64)
|
||||
* string
|
||||
* uint variants (uint, uint8, uint16, uint32, uint64)
|
||||
* struct
|
||||
* a pointer to one of the above types
|
||||
* a slice or a pointer to a slice of one of the above types
|
||||
|
||||
Unsupported types are simply ignored, however custom types can be registered to be converted.
|
||||
|
||||
## Setting Defaults
|
||||
|
||||
It is possible to set default values when encoding/decoding by using the `default` tag option. The value of `default` is applied when a field has a zero value, a pointer has a nil value, or a slice is empty.
|
||||
|
||||
```go
|
||||
type Person struct {
|
||||
Phone string `schema:"phone,default:+123456"` // custom name
|
||||
Age int `schema:"age,default:21"`
|
||||
Admin bool `schema:"admin,default:false"`
|
||||
Balance float64 `schema:"balance,default:10.0"`
|
||||
Friends []string `schema:friends,default:john|bob`
|
||||
}
|
||||
```
|
||||
|
||||
The `default` tag option is supported for the following types:
|
||||
|
||||
* bool
|
||||
* float variants (float32, float64)
|
||||
* int variants (int, int8, int16, int32, int64)
|
||||
* uint variants (uint, uint8, uint16, uint32, uint64)
|
||||
* string
|
||||
* a slice of the above types. As shown in the example above, `|` should be used to separate between slice items.
|
||||
* a pointer to one of the above types (pointer to slice and slice of pointers are not supported).
|
||||
|
||||
> [!NOTE]
|
||||
> Because primitive types like int, float, bool, unint and their variants have their default (or zero) values set by Golang, it is not possible to distinguish them from a provided value when decoding/encoding form values. In this case, the value provided by the `default` option tag will be always applied. For example, let's assume that the value submitted in the form for `balance` is `0.0` then the default of `10.0` will be applied, even if `0.0` is part of the form data for the `balance` field. In such cases, it is highly recommended to use pointers to allow schema to distinguish between when a form field has no provided value and when a form has a value equal to the corresponding default set by Golang for a particular type. If the type of the `Balance` field above is changed to `*float64`, then the zero value would be `nil`. In this case, if the form data value for `balance` is `0.0`, then the default will not be applied.
|
||||
|
||||
## License
|
||||
|
||||
BSD licensed. See the LICENSE file for details.
|
317
vendor/github.com/gorilla/schema/cache.go
generated
vendored
Normal file
317
vendor/github.com/gorilla/schema/cache.go
generated
vendored
Normal file
|
@ -0,0 +1,317 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var errInvalidPath = errors.New("schema: invalid path")
|
||||
|
||||
// newCache returns a new cache.
|
||||
func newCache() *cache {
|
||||
c := cache{
|
||||
m: make(map[reflect.Type]*structInfo),
|
||||
regconv: make(map[reflect.Type]Converter),
|
||||
tag: "schema",
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// cache caches meta-data about a struct.
|
||||
type cache struct {
|
||||
l sync.RWMutex
|
||||
m map[reflect.Type]*structInfo
|
||||
regconv map[reflect.Type]Converter
|
||||
tag string
|
||||
}
|
||||
|
||||
// registerConverter registers a converter function for a custom type.
|
||||
func (c *cache) registerConverter(value interface{}, converterFunc Converter) {
|
||||
c.regconv[reflect.TypeOf(value)] = converterFunc
|
||||
}
|
||||
|
||||
// parsePath parses a path in dotted notation verifying that it is a valid
|
||||
// path to a struct field.
|
||||
//
|
||||
// It returns "path parts" which contain indices to fields to be used by
|
||||
// reflect.Value.FieldByString(). Multiple parts are required for slices of
|
||||
// structs.
|
||||
func (c *cache) parsePath(p string, t reflect.Type) ([]pathPart, error) {
|
||||
var struc *structInfo
|
||||
var field *fieldInfo
|
||||
var index64 int64
|
||||
var err error
|
||||
parts := make([]pathPart, 0)
|
||||
path := make([]string, 0)
|
||||
keys := strings.Split(p, ".")
|
||||
for i := 0; i < len(keys); i++ {
|
||||
if t.Kind() != reflect.Struct {
|
||||
return nil, errInvalidPath
|
||||
}
|
||||
if struc = c.get(t); struc == nil {
|
||||
return nil, errInvalidPath
|
||||
}
|
||||
if field = struc.get(keys[i]); field == nil {
|
||||
return nil, errInvalidPath
|
||||
}
|
||||
// Valid field. Append index.
|
||||
path = append(path, field.name)
|
||||
if field.isSliceOfStructs && (!field.unmarshalerInfo.IsValid || (field.unmarshalerInfo.IsValid && field.unmarshalerInfo.IsSliceElement)) {
|
||||
// Parse a special case: slices of structs.
|
||||
// i+1 must be the slice index.
|
||||
//
|
||||
// Now that struct can implements TextUnmarshaler interface,
|
||||
// we don't need to force the struct's fields to appear in the path.
|
||||
// So checking i+2 is not necessary anymore.
|
||||
i++
|
||||
if i+1 > len(keys) {
|
||||
return nil, errInvalidPath
|
||||
}
|
||||
if index64, err = strconv.ParseInt(keys[i], 10, 0); err != nil {
|
||||
return nil, errInvalidPath
|
||||
}
|
||||
parts = append(parts, pathPart{
|
||||
path: path,
|
||||
field: field,
|
||||
index: int(index64),
|
||||
})
|
||||
path = make([]string, 0)
|
||||
|
||||
// Get the next struct type, dropping ptrs.
|
||||
if field.typ.Kind() == reflect.Ptr {
|
||||
t = field.typ.Elem()
|
||||
} else {
|
||||
t = field.typ
|
||||
}
|
||||
if t.Kind() == reflect.Slice {
|
||||
t = t.Elem()
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
}
|
||||
} else if field.typ.Kind() == reflect.Ptr {
|
||||
t = field.typ.Elem()
|
||||
} else {
|
||||
t = field.typ
|
||||
}
|
||||
}
|
||||
// Add the remaining.
|
||||
parts = append(parts, pathPart{
|
||||
path: path,
|
||||
field: field,
|
||||
index: -1,
|
||||
})
|
||||
return parts, nil
|
||||
}
|
||||
|
||||
// get returns a cached structInfo, creating it if necessary.
|
||||
func (c *cache) get(t reflect.Type) *structInfo {
|
||||
c.l.RLock()
|
||||
info := c.m[t]
|
||||
c.l.RUnlock()
|
||||
if info == nil {
|
||||
info = c.create(t, "")
|
||||
c.l.Lock()
|
||||
c.m[t] = info
|
||||
c.l.Unlock()
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// create creates a structInfo with meta-data about a struct.
|
||||
func (c *cache) create(t reflect.Type, parentAlias string) *structInfo {
|
||||
info := &structInfo{}
|
||||
var anonymousInfos []*structInfo
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
if f := c.createField(t.Field(i), parentAlias); f != nil {
|
||||
info.fields = append(info.fields, f)
|
||||
if ft := indirectType(f.typ); ft.Kind() == reflect.Struct && f.isAnonymous {
|
||||
anonymousInfos = append(anonymousInfos, c.create(ft, f.canonicalAlias))
|
||||
}
|
||||
}
|
||||
}
|
||||
for i, a := range anonymousInfos {
|
||||
others := []*structInfo{info}
|
||||
others = append(others, anonymousInfos[:i]...)
|
||||
others = append(others, anonymousInfos[i+1:]...)
|
||||
for _, f := range a.fields {
|
||||
if !containsAlias(others, f.alias) {
|
||||
info.fields = append(info.fields, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
||||
// createField creates a fieldInfo for the given field.
|
||||
func (c *cache) createField(field reflect.StructField, parentAlias string) *fieldInfo {
|
||||
alias, options := fieldAlias(field, c.tag)
|
||||
if alias == "-" {
|
||||
// Ignore this field.
|
||||
return nil
|
||||
}
|
||||
canonicalAlias := alias
|
||||
if parentAlias != "" {
|
||||
canonicalAlias = parentAlias + "." + alias
|
||||
}
|
||||
// Check if the type is supported and don't cache it if not.
|
||||
// First let's get the basic type.
|
||||
isSlice, isStruct := false, false
|
||||
ft := field.Type
|
||||
m := isTextUnmarshaler(reflect.Zero(ft))
|
||||
if ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
if isSlice = ft.Kind() == reflect.Slice; isSlice {
|
||||
ft = ft.Elem()
|
||||
if ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
}
|
||||
if ft.Kind() == reflect.Array {
|
||||
ft = ft.Elem()
|
||||
if ft.Kind() == reflect.Ptr {
|
||||
ft = ft.Elem()
|
||||
}
|
||||
}
|
||||
if isStruct = ft.Kind() == reflect.Struct; !isStruct {
|
||||
if c.converter(ft) == nil && builtinConverters[ft.Kind()] == nil {
|
||||
// Type is not supported.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return &fieldInfo{
|
||||
typ: field.Type,
|
||||
name: field.Name,
|
||||
alias: alias,
|
||||
canonicalAlias: canonicalAlias,
|
||||
unmarshalerInfo: m,
|
||||
isSliceOfStructs: isSlice && isStruct,
|
||||
isAnonymous: field.Anonymous,
|
||||
isRequired: options.Contains("required"),
|
||||
defaultValue: options.getDefaultOptionValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// converter returns the converter for a type.
|
||||
func (c *cache) converter(t reflect.Type) Converter {
|
||||
return c.regconv[t]
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
type structInfo struct {
|
||||
fields []*fieldInfo
|
||||
}
|
||||
|
||||
func (i *structInfo) get(alias string) *fieldInfo {
|
||||
for _, field := range i.fields {
|
||||
if strings.EqualFold(field.alias, alias) {
|
||||
return field
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func containsAlias(infos []*structInfo, alias string) bool {
|
||||
for _, info := range infos {
|
||||
if info.get(alias) != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type fieldInfo struct {
|
||||
typ reflect.Type
|
||||
// name is the field name in the struct.
|
||||
name string
|
||||
alias string
|
||||
// canonicalAlias is almost the same as the alias, but is prefixed with
|
||||
// an embedded struct field alias in dotted notation if this field is
|
||||
// promoted from the struct.
|
||||
// For instance, if the alias is "N" and this field is an embedded field
|
||||
// in a struct "X", canonicalAlias will be "X.N".
|
||||
canonicalAlias string
|
||||
// unmarshalerInfo contains information regarding the
|
||||
// encoding.TextUnmarshaler implementation of the field type.
|
||||
unmarshalerInfo unmarshaler
|
||||
// isSliceOfStructs indicates if the field type is a slice of structs.
|
||||
isSliceOfStructs bool
|
||||
// isAnonymous indicates whether the field is embedded in the struct.
|
||||
isAnonymous bool
|
||||
isRequired bool
|
||||
defaultValue string
|
||||
}
|
||||
|
||||
func (f *fieldInfo) paths(prefix string) []string {
|
||||
if f.alias == f.canonicalAlias {
|
||||
return []string{prefix + f.alias}
|
||||
}
|
||||
return []string{prefix + f.alias, prefix + f.canonicalAlias}
|
||||
}
|
||||
|
||||
type pathPart struct {
|
||||
field *fieldInfo
|
||||
path []string // path to the field: walks structs using field names.
|
||||
index int // struct index in slices of structs.
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
|
||||
func indirectType(typ reflect.Type) reflect.Type {
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
return typ.Elem()
|
||||
}
|
||||
return typ
|
||||
}
|
||||
|
||||
// fieldAlias parses a field tag to get a field alias.
|
||||
func fieldAlias(field reflect.StructField, tagName string) (alias string, options tagOptions) {
|
||||
if tag := field.Tag.Get(tagName); tag != "" {
|
||||
alias, options = parseTag(tag)
|
||||
}
|
||||
if alias == "" {
|
||||
alias = field.Name
|
||||
}
|
||||
return alias, options
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's tag, or
|
||||
// the empty string. It does not include the leading comma.
|
||||
type tagOptions []string
|
||||
|
||||
// parseTag splits a struct field's url tag into its name and comma-separated
|
||||
// options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
s := strings.Split(tag, ",")
|
||||
return s[0], s[1:]
|
||||
}
|
||||
|
||||
// Contains checks whether the tagOptions contains the specified option.
|
||||
func (o tagOptions) Contains(option string) bool {
|
||||
for _, s := range o {
|
||||
if s == option {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (o tagOptions) getDefaultOptionValue() string {
|
||||
for _, s := range o {
|
||||
if strings.HasPrefix(s, "default:") {
|
||||
return strings.Split(s, ":")[1]
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
222
vendor/github.com/gorilla/schema/converter.go
generated
vendored
Normal file
222
vendor/github.com/gorilla/schema/converter.go
generated
vendored
Normal file
|
@ -0,0 +1,222 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Converter func(string) reflect.Value
|
||||
|
||||
var (
|
||||
invalidValue = reflect.Value{}
|
||||
boolType = reflect.Bool
|
||||
float32Type = reflect.Float32
|
||||
float64Type = reflect.Float64
|
||||
intType = reflect.Int
|
||||
int8Type = reflect.Int8
|
||||
int16Type = reflect.Int16
|
||||
int32Type = reflect.Int32
|
||||
int64Type = reflect.Int64
|
||||
stringType = reflect.String
|
||||
uintType = reflect.Uint
|
||||
uint8Type = reflect.Uint8
|
||||
uint16Type = reflect.Uint16
|
||||
uint32Type = reflect.Uint32
|
||||
uint64Type = reflect.Uint64
|
||||
)
|
||||
|
||||
// Default converters for basic types.
|
||||
var builtinConverters = map[reflect.Kind]Converter{
|
||||
boolType: convertBool,
|
||||
float32Type: convertFloat32,
|
||||
float64Type: convertFloat64,
|
||||
intType: convertInt,
|
||||
int8Type: convertInt8,
|
||||
int16Type: convertInt16,
|
||||
int32Type: convertInt32,
|
||||
int64Type: convertInt64,
|
||||
stringType: convertString,
|
||||
uintType: convertUint,
|
||||
uint8Type: convertUint8,
|
||||
uint16Type: convertUint16,
|
||||
uint32Type: convertUint32,
|
||||
uint64Type: convertUint64,
|
||||
}
|
||||
|
||||
func convertBool(value string) reflect.Value {
|
||||
if value == "on" {
|
||||
return reflect.ValueOf(true)
|
||||
} else if v, err := strconv.ParseBool(value); err == nil {
|
||||
return reflect.ValueOf(v)
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertFloat32(value string) reflect.Value {
|
||||
if v, err := strconv.ParseFloat(value, 32); err == nil {
|
||||
return reflect.ValueOf(float32(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertFloat64(value string) reflect.Value {
|
||||
if v, err := strconv.ParseFloat(value, 64); err == nil {
|
||||
return reflect.ValueOf(v)
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertInt(value string) reflect.Value {
|
||||
if v, err := strconv.ParseInt(value, 10, 0); err == nil {
|
||||
return reflect.ValueOf(int(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertInt8(value string) reflect.Value {
|
||||
if v, err := strconv.ParseInt(value, 10, 8); err == nil {
|
||||
return reflect.ValueOf(int8(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertInt16(value string) reflect.Value {
|
||||
if v, err := strconv.ParseInt(value, 10, 16); err == nil {
|
||||
return reflect.ValueOf(int16(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertInt32(value string) reflect.Value {
|
||||
if v, err := strconv.ParseInt(value, 10, 32); err == nil {
|
||||
return reflect.ValueOf(int32(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertInt64(value string) reflect.Value {
|
||||
if v, err := strconv.ParseInt(value, 10, 64); err == nil {
|
||||
return reflect.ValueOf(v)
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertString(value string) reflect.Value {
|
||||
return reflect.ValueOf(value)
|
||||
}
|
||||
|
||||
func convertUint(value string) reflect.Value {
|
||||
if v, err := strconv.ParseUint(value, 10, 0); err == nil {
|
||||
return reflect.ValueOf(uint(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertUint8(value string) reflect.Value {
|
||||
if v, err := strconv.ParseUint(value, 10, 8); err == nil {
|
||||
return reflect.ValueOf(uint8(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertUint16(value string) reflect.Value {
|
||||
if v, err := strconv.ParseUint(value, 10, 16); err == nil {
|
||||
return reflect.ValueOf(uint16(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertUint32(value string) reflect.Value {
|
||||
if v, err := strconv.ParseUint(value, 10, 32); err == nil {
|
||||
return reflect.ValueOf(uint32(v))
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertUint64(value string) reflect.Value {
|
||||
if v, err := strconv.ParseUint(value, 10, 64); err == nil {
|
||||
return reflect.ValueOf(v)
|
||||
}
|
||||
return invalidValue
|
||||
}
|
||||
|
||||
func convertPointer(k reflect.Kind, value string) reflect.Value {
|
||||
switch k {
|
||||
case boolType:
|
||||
if v := convertBool(value); v.IsValid() {
|
||||
converted := v.Bool()
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case float32Type:
|
||||
if v := convertFloat32(value); v.IsValid() {
|
||||
converted := float32(v.Float())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case float64Type:
|
||||
if v := convertFloat64(value); v.IsValid() {
|
||||
converted := float64(v.Float())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case intType:
|
||||
if v := convertInt(value); v.IsValid() {
|
||||
converted := int(v.Int())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case int8Type:
|
||||
if v := convertInt8(value); v.IsValid() {
|
||||
converted := int8(v.Int())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case int16Type:
|
||||
if v := convertInt16(value); v.IsValid() {
|
||||
converted := int16(v.Int())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case int32Type:
|
||||
if v := convertInt32(value); v.IsValid() {
|
||||
converted := int32(v.Int())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case int64Type:
|
||||
if v := convertInt64(value); v.IsValid() {
|
||||
converted := int64(v.Int())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case stringType:
|
||||
if v := convertString(value); v.IsValid() {
|
||||
converted := v.String()
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case uintType:
|
||||
if v := convertUint(value); v.IsValid() {
|
||||
converted := uint(v.Uint())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case uint8Type:
|
||||
if v := convertUint8(value); v.IsValid() {
|
||||
converted := uint8(v.Uint())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case uint16Type:
|
||||
if v := convertUint16(value); v.IsValid() {
|
||||
converted := uint16(v.Uint())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case uint32Type:
|
||||
if v := convertUint32(value); v.IsValid() {
|
||||
converted := uint32(v.Uint())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
case uint64Type:
|
||||
if v := convertUint64(value); v.IsValid() {
|
||||
converted := uint64(v.Uint())
|
||||
return reflect.ValueOf(&converted)
|
||||
}
|
||||
}
|
||||
|
||||
return invalidValue
|
||||
}
|
620
vendor/github.com/gorilla/schema/decoder.go
generated
vendored
Normal file
620
vendor/github.com/gorilla/schema/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,620 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package schema
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxSize = 16000
|
||||
)
|
||||
|
||||
// NewDecoder returns a new Decoder.
|
||||
func NewDecoder() *Decoder {
|
||||
return &Decoder{cache: newCache(), maxSize: defaultMaxSize}
|
||||
}
|
||||
|
||||
// Decoder decodes values from a map[string][]string to a struct.
|
||||
type Decoder struct {
|
||||
cache *cache
|
||||
zeroEmpty bool
|
||||
ignoreUnknownKeys bool
|
||||
maxSize int
|
||||
}
|
||||
|
||||
// SetAliasTag changes the tag used to locate custom field aliases.
|
||||
// The default tag is "schema".
|
||||
func (d *Decoder) SetAliasTag(tag string) {
|
||||
d.cache.tag = tag
|
||||
}
|
||||
|
||||
// ZeroEmpty controls the behaviour when the decoder encounters empty values
|
||||
// in a map.
|
||||
// If z is true and a key in the map has the empty string as a value
|
||||
// then the corresponding struct field is set to the zero value.
|
||||
// If z is false then empty strings are ignored.
|
||||
//
|
||||
// The default value is false, that is empty values do not change
|
||||
// the value of the struct field.
|
||||
func (d *Decoder) ZeroEmpty(z bool) {
|
||||
d.zeroEmpty = z
|
||||
}
|
||||
|
||||
// IgnoreUnknownKeys controls the behaviour when the decoder encounters unknown
|
||||
// keys in the map.
|
||||
// If i is true and an unknown field is encountered, it is ignored. This is
|
||||
// similar to how unknown keys are handled by encoding/json.
|
||||
// If i is false then Decode will return an error. Note that any valid keys
|
||||
// will still be decoded in to the target struct.
|
||||
//
|
||||
// To preserve backwards compatibility, the default value is false.
|
||||
func (d *Decoder) IgnoreUnknownKeys(i bool) {
|
||||
d.ignoreUnknownKeys = i
|
||||
}
|
||||
|
||||
// MaxSize limits the size of slices for URL nested arrays or object arrays.
|
||||
// Choose MaxSize carefully; large values may create many zero-value slice elements.
|
||||
// Example: "items.100000=apple" would create a slice with 100,000 empty strings.
|
||||
func (d *Decoder) MaxSize(size int) {
|
||||
d.maxSize = size
|
||||
}
|
||||
|
||||
// RegisterConverter registers a converter function for a custom type.
|
||||
func (d *Decoder) RegisterConverter(value interface{}, converterFunc Converter) {
|
||||
d.cache.registerConverter(value, converterFunc)
|
||||
}
|
||||
|
||||
// Decode decodes a map[string][]string to a struct.
|
||||
//
|
||||
// The first parameter must be a pointer to a struct.
|
||||
//
|
||||
// The second parameter is a map, typically url.Values from an HTTP request.
|
||||
// Keys are "paths" in dotted notation to the struct fields and nested structs.
|
||||
//
|
||||
// See the package documentation for a full explanation of the mechanics.
|
||||
func (d *Decoder) Decode(dst interface{}, src map[string][]string) error {
|
||||
v := reflect.ValueOf(dst)
|
||||
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
|
||||
return errors.New("schema: interface must be a pointer to struct")
|
||||
}
|
||||
v = v.Elem()
|
||||
t := v.Type()
|
||||
errors := MultiError{}
|
||||
for path, values := range src {
|
||||
if parts, err := d.cache.parsePath(path, t); err == nil {
|
||||
if err = d.decode(v, path, parts, values); err != nil {
|
||||
errors[path] = err
|
||||
}
|
||||
} else if !d.ignoreUnknownKeys {
|
||||
errors[path] = UnknownKeyError{Key: path}
|
||||
}
|
||||
}
|
||||
errors.merge(d.setDefaults(t, v))
|
||||
errors.merge(d.checkRequired(t, src))
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setDefaults sets the default values when the `default` tag is specified,
|
||||
// default is supported on basic/primitive types and their pointers,
|
||||
// nested structs can also have default tags
|
||||
func (d *Decoder) setDefaults(t reflect.Type, v reflect.Value) MultiError {
|
||||
struc := d.cache.get(t)
|
||||
if struc == nil {
|
||||
// unexpect, cache.get never return nil
|
||||
return MultiError{"default-" + t.Name(): errors.New("cache fail")}
|
||||
}
|
||||
|
||||
errs := MultiError{}
|
||||
|
||||
if v.Type().Kind() == reflect.Struct {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous {
|
||||
field.Set(reflect.New(field.Type().Elem()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, f := range struc.fields {
|
||||
vCurrent := v.FieldByName(f.name)
|
||||
|
||||
if vCurrent.Type().Kind() == reflect.Struct && f.defaultValue == "" {
|
||||
errs.merge(d.setDefaults(vCurrent.Type(), vCurrent))
|
||||
} else if isPointerToStruct(vCurrent) && f.defaultValue == "" {
|
||||
errs.merge(d.setDefaults(vCurrent.Elem().Type(), vCurrent.Elem()))
|
||||
}
|
||||
|
||||
if f.defaultValue != "" && f.isRequired {
|
||||
errs.merge(MultiError{"default-" + f.name: errors.New("required fields cannot have a default value")})
|
||||
} else if f.defaultValue != "" && vCurrent.IsZero() && !f.isRequired {
|
||||
if f.typ.Kind() == reflect.Struct {
|
||||
errs.merge(MultiError{"default-" + f.name: errors.New("default option is supported only on: bool, float variants, string, unit variants types or their corresponding pointers or slices")})
|
||||
} else if f.typ.Kind() == reflect.Slice {
|
||||
vals := strings.Split(f.defaultValue, "|")
|
||||
|
||||
// check if slice has one of the supported types for defaults
|
||||
if _, ok := builtinConverters[f.typ.Elem().Kind()]; !ok {
|
||||
errs.merge(MultiError{"default-" + f.name: errors.New("default option is supported only on: bool, float variants, string, unit variants types or their corresponding pointers or slices")})
|
||||
continue
|
||||
}
|
||||
|
||||
defaultSlice := reflect.MakeSlice(f.typ, 0, cap(vals))
|
||||
for _, val := range vals {
|
||||
// this check is to handle if the wrong value is provided
|
||||
convertedVal := builtinConverters[f.typ.Elem().Kind()](val)
|
||||
if !convertedVal.IsValid() {
|
||||
errs.merge(MultiError{"default-" + f.name: fmt.Errorf("failed setting default: %s is not compatible with field %s type", val, f.name)})
|
||||
break
|
||||
}
|
||||
defaultSlice = reflect.Append(defaultSlice, convertedVal)
|
||||
}
|
||||
vCurrent.Set(defaultSlice)
|
||||
} else if f.typ.Kind() == reflect.Ptr {
|
||||
t1 := f.typ.Elem()
|
||||
|
||||
if t1.Kind() == reflect.Struct || t1.Kind() == reflect.Slice {
|
||||
errs.merge(MultiError{"default-" + f.name: errors.New("default option is supported only on: bool, float variants, string, unit variants types or their corresponding pointers or slices")})
|
||||
}
|
||||
|
||||
// this check is to handle if the wrong value is provided
|
||||
if convertedVal := convertPointer(t1.Kind(), f.defaultValue); convertedVal.IsValid() {
|
||||
vCurrent.Set(convertedVal)
|
||||
}
|
||||
} else {
|
||||
// this check is to handle if the wrong value is provided
|
||||
if convertedVal := builtinConverters[f.typ.Kind()](f.defaultValue); convertedVal.IsValid() {
|
||||
vCurrent.Set(builtinConverters[f.typ.Kind()](f.defaultValue))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return errs
|
||||
}
|
||||
|
||||
func isPointerToStruct(v reflect.Value) bool {
|
||||
return !v.IsZero() && v.Type().Kind() == reflect.Ptr && v.Elem().Type().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
// checkRequired checks whether required fields are empty
|
||||
//
|
||||
// check type t recursively if t has struct fields.
|
||||
//
|
||||
// src is the source map for decoding, we use it here to see if those required fields are included in src
|
||||
func (d *Decoder) checkRequired(t reflect.Type, src map[string][]string) MultiError {
|
||||
m, errs := d.findRequiredFields(t, "", "")
|
||||
for key, fields := range m {
|
||||
if isEmptyFields(fields, src) {
|
||||
errs[key] = EmptyFieldError{Key: key}
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// findRequiredFields recursively searches the struct type t for required fields.
|
||||
//
|
||||
// canonicalPrefix and searchPrefix are used to resolve full paths in dotted notation
|
||||
// for nested struct fields. canonicalPrefix is a complete path which never omits
|
||||
// any embedded struct fields. searchPrefix is a user-friendly path which may omit
|
||||
// some embedded struct fields to point promoted fields.
|
||||
func (d *Decoder) findRequiredFields(t reflect.Type, canonicalPrefix, searchPrefix string) (map[string][]fieldWithPrefix, MultiError) {
|
||||
struc := d.cache.get(t)
|
||||
if struc == nil {
|
||||
// unexpect, cache.get never return nil
|
||||
return nil, MultiError{canonicalPrefix + "*": errors.New("cache fail")}
|
||||
}
|
||||
|
||||
m := map[string][]fieldWithPrefix{}
|
||||
errs := MultiError{}
|
||||
for _, f := range struc.fields {
|
||||
if f.typ.Kind() == reflect.Struct {
|
||||
fcprefix := canonicalPrefix + f.canonicalAlias + "."
|
||||
for _, fspath := range f.paths(searchPrefix) {
|
||||
fm, ferrs := d.findRequiredFields(f.typ, fcprefix, fspath+".")
|
||||
for key, fields := range fm {
|
||||
m[key] = append(m[key], fields...)
|
||||
}
|
||||
errs.merge(ferrs)
|
||||
}
|
||||
}
|
||||
if f.isRequired {
|
||||
key := canonicalPrefix + f.canonicalAlias
|
||||
m[key] = append(m[key], fieldWithPrefix{
|
||||
fieldInfo: f,
|
||||
prefix: searchPrefix,
|
||||
})
|
||||
}
|
||||
}
|
||||
return m, errs
|
||||
}
|
||||
|
||||
type fieldWithPrefix struct {
|
||||
*fieldInfo
|
||||
prefix string
|
||||
}
|
||||
|
||||
// isEmptyFields returns true if all of specified fields are empty.
|
||||
func isEmptyFields(fields []fieldWithPrefix, src map[string][]string) bool {
|
||||
for _, f := range fields {
|
||||
for _, path := range f.paths(f.prefix) {
|
||||
v, ok := src[path]
|
||||
if ok && !isEmpty(f.typ, v) {
|
||||
return false
|
||||
}
|
||||
for key := range src {
|
||||
if !isEmpty(f.typ, src[key]) && strings.HasPrefix(key, path) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// isEmpty returns true if value is empty for specific type
|
||||
func isEmpty(t reflect.Type, value []string) bool {
|
||||
if len(value) == 0 {
|
||||
return true
|
||||
}
|
||||
switch t.Kind() {
|
||||
case boolType, float32Type, float64Type, intType, int8Type, int32Type, int64Type, stringType, uint8Type, uint16Type, uint32Type, uint64Type:
|
||||
return len(value[0]) == 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// decode fills a struct field using a parsed path.
|
||||
func (d *Decoder) decode(v reflect.Value, path string, parts []pathPart, values []string) error {
|
||||
// Get the field walking the struct fields by index.
|
||||
for _, name := range parts[0].path {
|
||||
if v.Type().Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// alloc embedded structs
|
||||
if v.Type().Kind() == reflect.Struct {
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Field(i)
|
||||
if field.Type().Kind() == reflect.Ptr && field.IsNil() && v.Type().Field(i).Anonymous {
|
||||
field.Set(reflect.New(field.Type().Elem()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
v = v.FieldByName(name)
|
||||
}
|
||||
// Don't even bother for unexported fields.
|
||||
if !v.CanSet() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Dereference if needed.
|
||||
t := v.Type()
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(t))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
|
||||
// Slice of structs. Let's go recursive.
|
||||
if len(parts) > 1 {
|
||||
idx := parts[0].index
|
||||
// a defensive check to avoid creating a large slice based on user input index
|
||||
if idx > d.maxSize {
|
||||
return fmt.Errorf("%v index %d is larger than the configured maxSize %d", v.Kind(), idx, d.maxSize)
|
||||
}
|
||||
if v.IsNil() || v.Len() < idx+1 {
|
||||
value := reflect.MakeSlice(t, idx+1, idx+1)
|
||||
if v.Len() < idx+1 {
|
||||
// Resize it.
|
||||
reflect.Copy(value, v)
|
||||
}
|
||||
v.Set(value)
|
||||
}
|
||||
return d.decode(v.Index(idx), path, parts[1:], values)
|
||||
}
|
||||
|
||||
// Get the converter early in case there is one for a slice type.
|
||||
conv := d.cache.converter(t)
|
||||
m := isTextUnmarshaler(v)
|
||||
if conv == nil && t.Kind() == reflect.Slice && m.IsSliceElement {
|
||||
var items []reflect.Value
|
||||
elemT := t.Elem()
|
||||
isPtrElem := elemT.Kind() == reflect.Ptr
|
||||
if isPtrElem {
|
||||
elemT = elemT.Elem()
|
||||
}
|
||||
|
||||
// Try to get a converter for the element type.
|
||||
conv := d.cache.converter(elemT)
|
||||
if conv == nil {
|
||||
conv = builtinConverters[elemT.Kind()]
|
||||
if conv == nil {
|
||||
// As we are not dealing with slice of structs here, we don't need to check if the type
|
||||
// implements TextUnmarshaler interface
|
||||
return fmt.Errorf("schema: converter not found for %v", elemT)
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range values {
|
||||
if value == "" {
|
||||
if d.zeroEmpty {
|
||||
items = append(items, reflect.Zero(elemT))
|
||||
}
|
||||
} else if m.IsValid {
|
||||
u := reflect.New(elemT)
|
||||
if m.IsSliceElementPtr {
|
||||
u = reflect.New(reflect.PtrTo(elemT).Elem())
|
||||
}
|
||||
if err := u.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(value)); err != nil {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: t,
|
||||
Index: key,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
if m.IsSliceElementPtr {
|
||||
items = append(items, u.Elem().Addr())
|
||||
} else if u.Kind() == reflect.Ptr {
|
||||
items = append(items, u.Elem())
|
||||
} else {
|
||||
items = append(items, u)
|
||||
}
|
||||
} else if item := conv(value); item.IsValid() {
|
||||
if isPtrElem {
|
||||
ptr := reflect.New(elemT)
|
||||
ptr.Elem().Set(item)
|
||||
item = ptr
|
||||
}
|
||||
if item.Type() != elemT && !isPtrElem {
|
||||
item = item.Convert(elemT)
|
||||
}
|
||||
items = append(items, item)
|
||||
} else {
|
||||
if strings.Contains(value, ",") {
|
||||
values := strings.Split(value, ",")
|
||||
for _, value := range values {
|
||||
if value == "" {
|
||||
if d.zeroEmpty {
|
||||
items = append(items, reflect.Zero(elemT))
|
||||
}
|
||||
} else if item := conv(value); item.IsValid() {
|
||||
if isPtrElem {
|
||||
ptr := reflect.New(elemT)
|
||||
ptr.Elem().Set(item)
|
||||
item = ptr
|
||||
}
|
||||
if item.Type() != elemT && !isPtrElem {
|
||||
item = item.Convert(elemT)
|
||||
}
|
||||
items = append(items, item)
|
||||
} else {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: elemT,
|
||||
Index: key,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: elemT,
|
||||
Index: key,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
value := reflect.Append(reflect.MakeSlice(t, 0, 0), items...)
|
||||
v.Set(value)
|
||||
} else {
|
||||
val := ""
|
||||
// Use the last value provided if any values were provided
|
||||
if len(values) > 0 {
|
||||
val = values[len(values)-1]
|
||||
}
|
||||
|
||||
if conv != nil {
|
||||
if value := conv(val); value.IsValid() {
|
||||
v.Set(value.Convert(t))
|
||||
} else {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: t,
|
||||
Index: -1,
|
||||
}
|
||||
}
|
||||
} else if m.IsValid {
|
||||
if m.IsPtr {
|
||||
u := reflect.New(v.Type())
|
||||
if err := u.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(val)); err != nil {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: t,
|
||||
Index: -1,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
v.Set(reflect.Indirect(u))
|
||||
} else {
|
||||
// If the value implements the encoding.TextUnmarshaler interface
|
||||
// apply UnmarshalText as the converter
|
||||
if err := m.Unmarshaler.UnmarshalText([]byte(val)); err != nil {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: t,
|
||||
Index: -1,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if val == "" {
|
||||
if d.zeroEmpty {
|
||||
v.Set(reflect.Zero(t))
|
||||
}
|
||||
} else if conv := builtinConverters[t.Kind()]; conv != nil {
|
||||
if value := conv(val); value.IsValid() {
|
||||
v.Set(value.Convert(t))
|
||||
} else {
|
||||
return ConversionError{
|
||||
Key: path,
|
||||
Type: t,
|
||||
Index: -1,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("schema: converter not found for %v", t)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isTextUnmarshaler(v reflect.Value) unmarshaler {
|
||||
// Create a new unmarshaller instance
|
||||
m := unmarshaler{}
|
||||
if m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler); m.IsValid {
|
||||
return m
|
||||
}
|
||||
// As the UnmarshalText function should be applied to the pointer of the
|
||||
// type, we check that type to see if it implements the necessary
|
||||
// method.
|
||||
if m.Unmarshaler, m.IsValid = reflect.New(v.Type()).Interface().(encoding.TextUnmarshaler); m.IsValid {
|
||||
m.IsPtr = true
|
||||
return m
|
||||
}
|
||||
|
||||
// if v is []T or *[]T create new T
|
||||
t := v.Type()
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() == reflect.Slice {
|
||||
// Check if the slice implements encoding.TextUnmarshaller
|
||||
if m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler); m.IsValid {
|
||||
return m
|
||||
}
|
||||
// If t is a pointer slice, check if its elements implement
|
||||
// encoding.TextUnmarshaler
|
||||
m.IsSliceElement = true
|
||||
if t = t.Elem(); t.Kind() == reflect.Ptr {
|
||||
t = reflect.PtrTo(t.Elem())
|
||||
v = reflect.Zero(t)
|
||||
m.IsSliceElementPtr = true
|
||||
m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler)
|
||||
return m
|
||||
}
|
||||
}
|
||||
|
||||
v = reflect.New(t)
|
||||
m.Unmarshaler, m.IsValid = v.Interface().(encoding.TextUnmarshaler)
|
||||
return m
|
||||
}
|
||||
|
||||
// TextUnmarshaler helpers ----------------------------------------------------
|
||||
// unmarshaller contains information about a TextUnmarshaler type
|
||||
type unmarshaler struct {
|
||||
Unmarshaler encoding.TextUnmarshaler
|
||||
// IsValid indicates whether the resolved type indicated by the other
|
||||
// flags implements the encoding.TextUnmarshaler interface.
|
||||
IsValid bool
|
||||
// IsPtr indicates that the resolved type is the pointer of the original
|
||||
// type.
|
||||
IsPtr bool
|
||||
// IsSliceElement indicates that the resolved type is a slice element of
|
||||
// the original type.
|
||||
IsSliceElement bool
|
||||
// IsSliceElementPtr indicates that the resolved type is a pointer to a
|
||||
// slice element of the original type.
|
||||
IsSliceElementPtr bool
|
||||
}
|
||||
|
||||
// Errors ---------------------------------------------------------------------
|
||||
|
||||
// ConversionError stores information about a failed conversion.
|
||||
type ConversionError struct {
|
||||
Key string // key from the source map.
|
||||
Type reflect.Type // expected type of elem
|
||||
Index int // index for multi-value fields; -1 for single-value fields.
|
||||
Err error // low-level error (when it exists)
|
||||
}
|
||||
|
||||
func (e ConversionError) Error() string {
|
||||
var output string
|
||||
|
||||
if e.Index < 0 {
|
||||
output = fmt.Sprintf("schema: error converting value for %q", e.Key)
|
||||
} else {
|
||||
output = fmt.Sprintf("schema: error converting value for index %d of %q",
|
||||
e.Index, e.Key)
|
||||
}
|
||||
|
||||
if e.Err != nil {
|
||||
output = fmt.Sprintf("%s. Details: %s", output, e.Err)
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
||||
|
||||
// UnknownKeyError stores information about an unknown key in the source map.
|
||||
type UnknownKeyError struct {
|
||||
Key string // key from the source map.
|
||||
}
|
||||
|
||||
func (e UnknownKeyError) Error() string {
|
||||
return fmt.Sprintf("schema: invalid path %q", e.Key)
|
||||
}
|
||||
|
||||
// EmptyFieldError stores information about an empty required field.
|
||||
type EmptyFieldError struct {
|
||||
Key string // required key in the source map.
|
||||
}
|
||||
|
||||
func (e EmptyFieldError) Error() string {
|
||||
return fmt.Sprintf("%v is empty", e.Key)
|
||||
}
|
||||
|
||||
// MultiError stores multiple decoding errors.
|
||||
//
|
||||
// Borrowed from the App Engine SDK.
|
||||
type MultiError map[string]error
|
||||
|
||||
func (e MultiError) Error() string {
|
||||
s := ""
|
||||
for _, err := range e {
|
||||
s = err.Error()
|
||||
break
|
||||
}
|
||||
switch len(e) {
|
||||
case 0:
|
||||
return "(0 errors)"
|
||||
case 1:
|
||||
return s
|
||||
case 2:
|
||||
return s + " (and 1 other error)"
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d other errors)", s, len(e)-1)
|
||||
}
|
||||
|
||||
func (e MultiError) merge(errors MultiError) {
|
||||
for key, err := range errors {
|
||||
if e[key] == nil {
|
||||
e[key] = err
|
||||
}
|
||||
}
|
||||
}
|
148
vendor/github.com/gorilla/schema/doc.go
generated
vendored
Normal file
148
vendor/github.com/gorilla/schema/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2012 The Gorilla Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package gorilla/schema fills a struct with form values.
|
||||
|
||||
The basic usage is really simple. Given this struct:
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Phone string
|
||||
}
|
||||
|
||||
...we can fill it passing a map to the Decode() function:
|
||||
|
||||
values := map[string][]string{
|
||||
"Name": {"John"},
|
||||
"Phone": {"999-999-999"},
|
||||
}
|
||||
person := new(Person)
|
||||
decoder := schema.NewDecoder()
|
||||
decoder.Decode(person, values)
|
||||
|
||||
This is just a simple example and it doesn't make a lot of sense to create
|
||||
the map manually. Typically it will come from a http.Request object and
|
||||
will be of type url.Values, http.Request.Form, or http.Request.MultipartForm:
|
||||
|
||||
func MyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
err := r.ParseForm()
|
||||
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
decoder := schema.NewDecoder()
|
||||
// r.PostForm is a map of our POST form values
|
||||
err := decoder.Decode(person, r.PostForm)
|
||||
|
||||
if err != nil {
|
||||
// Handle error
|
||||
}
|
||||
|
||||
// Do something with person.Name or person.Phone
|
||||
}
|
||||
|
||||
Note: it is a good idea to set a Decoder instance as a package global,
|
||||
because it caches meta-data about structs, and an instance can be shared safely:
|
||||
|
||||
var decoder = schema.NewDecoder()
|
||||
|
||||
To define custom names for fields, use a struct tag "schema". To not populate
|
||||
certain fields, use a dash for the name and it will be ignored:
|
||||
|
||||
type Person struct {
|
||||
Name string `schema:"name"` // custom name
|
||||
Phone string `schema:"phone"` // custom name
|
||||
Admin bool `schema:"-"` // this field is never set
|
||||
}
|
||||
|
||||
The supported field types in the destination struct are:
|
||||
|
||||
* bool
|
||||
* float variants (float32, float64)
|
||||
* int variants (int, int8, int16, int32, int64)
|
||||
* string
|
||||
* uint variants (uint, uint8, uint16, uint32, uint64)
|
||||
* struct
|
||||
* a pointer to one of the above types
|
||||
* a slice or a pointer to a slice of one of the above types
|
||||
|
||||
Non-supported types are simply ignored, however custom types can be registered
|
||||
to be converted.
|
||||
|
||||
To fill nested structs, keys must use a dotted notation as the "path" for the
|
||||
field. So for example, to fill the struct Person below:
|
||||
|
||||
type Phone struct {
|
||||
Label string
|
||||
Number string
|
||||
}
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Phone Phone
|
||||
}
|
||||
|
||||
...the source map must have the keys "Name", "Phone.Label" and "Phone.Number".
|
||||
This means that an HTML form to fill a Person struct must look like this:
|
||||
|
||||
<form>
|
||||
<input type="text" name="Name">
|
||||
<input type="text" name="Phone.Label">
|
||||
<input type="text" name="Phone.Number">
|
||||
</form>
|
||||
|
||||
Single values are filled using the first value for a key from the source map.
|
||||
Slices are filled using all values for a key from the source map. So to fill
|
||||
a Person with multiple Phone values, like:
|
||||
|
||||
type Person struct {
|
||||
Name string
|
||||
Phones []Phone
|
||||
}
|
||||
|
||||
...an HTML form that accepts three Phone values would look like this:
|
||||
|
||||
<form>
|
||||
<input type="text" name="Name">
|
||||
<input type="text" name="Phones.0.Label">
|
||||
<input type="text" name="Phones.0.Number">
|
||||
<input type="text" name="Phones.1.Label">
|
||||
<input type="text" name="Phones.1.Number">
|
||||
<input type="text" name="Phones.2.Label">
|
||||
<input type="text" name="Phones.2.Number">
|
||||
</form>
|
||||
|
||||
Notice that only for slices of structs the slice index is required.
|
||||
This is needed for disambiguation: if the nested struct also had a slice
|
||||
field, we could not translate multiple values to it if we did not use an
|
||||
index for the parent struct.
|
||||
|
||||
There's also the possibility to create a custom type that implements the
|
||||
TextUnmarshaler interface, and in this case there's no need to register
|
||||
a converter, like:
|
||||
|
||||
type Person struct {
|
||||
Emails []Email
|
||||
}
|
||||
|
||||
type Email struct {
|
||||
*mail.Address
|
||||
}
|
||||
|
||||
func (e *Email) UnmarshalText(text []byte) (err error) {
|
||||
e.Address, err = mail.ParseAddress(string(text))
|
||||
return
|
||||
}
|
||||
|
||||
...an HTML form that accepts three Email values would look like this:
|
||||
|
||||
<form>
|
||||
<input type="email" name="Emails.0">
|
||||
<input type="email" name="Emails.1">
|
||||
<input type="email" name="Emails.2">
|
||||
</form>
|
||||
*/
|
||||
package schema
|
213
vendor/github.com/gorilla/schema/encoder.go
generated
vendored
Normal file
213
vendor/github.com/gorilla/schema/encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,213 @@
|
|||
package schema
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type encoderFunc func(reflect.Value) string
|
||||
|
||||
// Encoder encodes values from a struct into url.Values.
|
||||
type Encoder struct {
|
||||
cache *cache
|
||||
regenc map[reflect.Type]encoderFunc
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder with defaults.
|
||||
func NewEncoder() *Encoder {
|
||||
return &Encoder{cache: newCache(), regenc: make(map[reflect.Type]encoderFunc)}
|
||||
}
|
||||
|
||||
// Encode encodes a struct into map[string][]string.
|
||||
//
|
||||
// Intended for use with url.Values.
|
||||
func (e *Encoder) Encode(src interface{}, dst map[string][]string) error {
|
||||
v := reflect.ValueOf(src)
|
||||
|
||||
return e.encode(v, dst)
|
||||
}
|
||||
|
||||
// RegisterEncoder registers a converter for encoding a custom type.
|
||||
func (e *Encoder) RegisterEncoder(value interface{}, encoder func(reflect.Value) string) {
|
||||
e.regenc[reflect.TypeOf(value)] = encoder
|
||||
}
|
||||
|
||||
// SetAliasTag changes the tag used to locate custom field aliases.
|
||||
// The default tag is "schema".
|
||||
func (e *Encoder) SetAliasTag(tag string) {
|
||||
e.cache.tag = tag
|
||||
}
|
||||
|
||||
// isValidStructPointer test if input value is a valid struct pointer.
|
||||
func isValidStructPointer(v reflect.Value) bool {
|
||||
return v.Type().Kind() == reflect.Ptr && v.Elem().IsValid() && v.Elem().Type().Kind() == reflect.Struct
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Func:
|
||||
case reflect.Map, reflect.Slice:
|
||||
return v.IsNil() || v.Len() == 0
|
||||
case reflect.Array:
|
||||
z := true
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
z = z && isZero(v.Index(i))
|
||||
}
|
||||
return z
|
||||
case reflect.Struct:
|
||||
type zero interface {
|
||||
IsZero() bool
|
||||
}
|
||||
if v.Type().Implements(reflect.TypeOf((*zero)(nil)).Elem()) {
|
||||
iz := v.MethodByName("IsZero").Call([]reflect.Value{})[0]
|
||||
return iz.Interface().(bool)
|
||||
}
|
||||
z := true
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
z = z && isZero(v.Field(i))
|
||||
}
|
||||
return z
|
||||
}
|
||||
// Compare other types directly:
|
||||
z := reflect.Zero(v.Type())
|
||||
return v.Interface() == z.Interface()
|
||||
}
|
||||
|
||||
func (e *Encoder) encode(v reflect.Value, dst map[string][]string) error {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return errors.New("schema: interface must be a struct")
|
||||
}
|
||||
t := v.Type()
|
||||
|
||||
errors := MultiError{}
|
||||
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
name, opts := fieldAlias(t.Field(i), e.cache.tag)
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Encode struct pointer types if the field is a valid pointer and a struct.
|
||||
if isValidStructPointer(v.Field(i)) && !e.hasCustomEncoder(v.Field(i).Type()) {
|
||||
err := e.encode(v.Field(i).Elem(), dst)
|
||||
if err != nil {
|
||||
errors[v.Field(i).Elem().Type().String()] = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
encFunc := typeEncoder(v.Field(i).Type(), e.regenc)
|
||||
|
||||
// Encode non-slice types and custom implementations immediately.
|
||||
if encFunc != nil {
|
||||
value := encFunc(v.Field(i))
|
||||
if opts.Contains("omitempty") && isZero(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
|
||||
dst[name] = append(dst[name], value)
|
||||
continue
|
||||
}
|
||||
|
||||
if v.Field(i).Type().Kind() == reflect.Struct {
|
||||
err := e.encode(v.Field(i), dst)
|
||||
if err != nil {
|
||||
errors[v.Field(i).Type().String()] = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if v.Field(i).Type().Kind() == reflect.Slice {
|
||||
encFunc = typeEncoder(v.Field(i).Type().Elem(), e.regenc)
|
||||
}
|
||||
|
||||
if encFunc == nil {
|
||||
errors[v.Field(i).Type().String()] = fmt.Errorf("schema: encoder not found for %v", v.Field(i))
|
||||
continue
|
||||
}
|
||||
|
||||
// Encode a slice.
|
||||
if v.Field(i).Len() == 0 && opts.Contains("omitempty") {
|
||||
continue
|
||||
}
|
||||
|
||||
dst[name] = []string{}
|
||||
for j := 0; j < v.Field(i).Len(); j++ {
|
||||
dst[name] = append(dst[name], encFunc(v.Field(i).Index(j)))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) hasCustomEncoder(t reflect.Type) bool {
|
||||
_, exists := e.regenc[t]
|
||||
return exists
|
||||
}
|
||||
|
||||
func typeEncoder(t reflect.Type, reg map[reflect.Type]encoderFunc) encoderFunc {
|
||||
if f, ok := reg[t]; ok {
|
||||
return f
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return encodeBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return encodeInt
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return encodeUint
|
||||
case reflect.Float32:
|
||||
return encodeFloat32
|
||||
case reflect.Float64:
|
||||
return encodeFloat64
|
||||
case reflect.Ptr:
|
||||
f := typeEncoder(t.Elem(), reg)
|
||||
return func(v reflect.Value) string {
|
||||
if v.IsNil() {
|
||||
return "null"
|
||||
}
|
||||
return f(v.Elem())
|
||||
}
|
||||
case reflect.String:
|
||||
return encodeString
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBool(v reflect.Value) string {
|
||||
return strconv.FormatBool(v.Bool())
|
||||
}
|
||||
|
||||
func encodeInt(v reflect.Value) string {
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
}
|
||||
|
||||
func encodeUint(v reflect.Value) string {
|
||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||
}
|
||||
|
||||
func encodeFloat(v reflect.Value, bits int) string {
|
||||
return strconv.FormatFloat(v.Float(), 'f', 6, bits)
|
||||
}
|
||||
|
||||
func encodeFloat32(v reflect.Value) string {
|
||||
return encodeFloat(v, 32)
|
||||
}
|
||||
|
||||
func encodeFloat64(v reflect.Value) string {
|
||||
return encodeFloat(v, 64)
|
||||
}
|
||||
|
||||
func encodeString(v reflect.Value) string {
|
||||
return v.String()
|
||||
}
|
2
vendor/github.com/greatroar/blobloom/.gitattributes
generated
vendored
2
vendor/github.com/greatroar/blobloom/.gitattributes
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
# Work around https://github.com/golang/go/issues/52268.
|
||||
**/testdata/fuzz/*/* eol=lf
|
25
vendor/github.com/greatroar/blobloom/.golangci.yml
generated
vendored
25
vendor/github.com/greatroar/blobloom/.golangci.yml
generated
vendored
|
@ -1,25 +0,0 @@
|
|||
# Configuration for golangci-lint.
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- asciicheck
|
||||
enable:
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- godot
|
||||
- gofumpt
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- thelper
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
errcheck
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
enable:
|
||||
- atomicalign
|
202
vendor/github.com/greatroar/blobloom/LICENSE
generated
vendored
202
vendor/github.com/greatroar/blobloom/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
86
vendor/github.com/greatroar/blobloom/README.md
generated
vendored
86
vendor/github.com/greatroar/blobloom/README.md
generated
vendored
|
@ -1,86 +0,0 @@
|
|||
Blobloom
|
||||
========
|
||||
|
||||
A Bloom filter package for Go (golang) with no compile-time dependencies.
|
||||
|
||||
This package implements a version of Bloom filters called [blocked Bloom filters](
|
||||
https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf),
|
||||
which get a speed boost from using the CPU cache more efficiently
|
||||
than regular Bloom filters.
|
||||
|
||||
Unlike most Bloom filter packages for Go,
|
||||
this one doesn't run a hash function for you.
|
||||
That's a benefit if you need a custom hash
|
||||
or you want pick the fastest one for an application.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To construct a Bloom filter, you need to know how many keys you want to store
|
||||
and what rate of false positives you find acceptable.
|
||||
|
||||
f := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: nkeys, // Expected number of keys.
|
||||
FPRate: 1e-4, // Accept one false positive per 10,000 lookups.
|
||||
})
|
||||
|
||||
To add a key:
|
||||
|
||||
// import "github.com/cespare/xxhash/v2"
|
||||
f.Add(xxhash.Sum64(key))
|
||||
|
||||
To test for the presence of a key in the filter:
|
||||
|
||||
if f.Has(xxhash.Sum64(key)) {
|
||||
// Key is probably in f.
|
||||
} else {
|
||||
// Key is certainly not in f.
|
||||
}
|
||||
|
||||
The false positive rate is defined as usual:
|
||||
if you look up 10,000 random keys in a Bloom filter filled to capacity,
|
||||
an expected one of those is a false positive for FPRate 1e-4.
|
||||
|
||||
See the examples/ directory and the
|
||||
[package documentation](https://pkg.go.dev/github.com/greatroar/blobloom)
|
||||
for further usage information and examples.
|
||||
|
||||
Hash functions
|
||||
--------------
|
||||
|
||||
Blobloom does not provide hash functions. Instead, it requires client code to
|
||||
represent each key as a single 64-bit hash value, leaving it to the user to
|
||||
pick the right hash function for a particular problem. Here are some general
|
||||
suggestions:
|
||||
|
||||
* If you use Bloom filters to speed up access to a key-value store, you might
|
||||
want to look at [xxh3](https://github.com/zeebo/xxh3) or [xxhash](
|
||||
https://github.com/cespare/xxhash).
|
||||
* If your keys are cryptographic hashes, consider using the first 8 bytes of those hashes.
|
||||
* If you use Bloom filters to make probabilistic decisions, a randomized hash
|
||||
function such as [maphash](https://golang.org/pkg/hash/maphash) should prevent
|
||||
the same false positives occurring every time.
|
||||
|
||||
When evaluating a hash function, or designing a custom one,
|
||||
make sure it is a 64-bit hash that properly mixes its input bits.
|
||||
Casting a 32-bit hash to uint64 gives suboptimal results.
|
||||
So does passing integer keys in without running them through a mixing function.
|
||||
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright © 2020-2023 the Blobloom authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
279
vendor/github.com/greatroar/blobloom/bloomfilter.go
generated
vendored
279
vendor/github.com/greatroar/blobloom/bloomfilter.go
generated
vendored
|
@ -1,279 +0,0 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package blobloom implements blocked Bloom filters.
|
||||
//
|
||||
// Blocked Bloom filters are an approximate set data structure: if a key has
|
||||
// been added to a filter, a lookup of that key returns true, but if the key
|
||||
// has not been added, there is a non-zero probability that the lookup still
|
||||
// returns true (a false positive). False negatives are impossible: if the
|
||||
// lookup for a key returns false, that key has not been added.
|
||||
//
|
||||
// In this package, keys are represented exclusively as hashes. Client code
|
||||
// is responsible for supplying a 64-bit hash value.
|
||||
//
|
||||
// Compared to standard Bloom filters, blocked Bloom filters use the CPU
|
||||
// cache more efficiently. A blocked Bloom filter is an array of ordinary
|
||||
// Bloom filters of fixed size BlockBits (the blocks). The lower half of the
|
||||
// hash selects the block to use.
|
||||
//
|
||||
// To achieve the same false positive rate (FPR) as a standard Bloom filter,
|
||||
// a blocked Bloom filter requires more memory. For an FPR of at most 2e-6
|
||||
// (two in a million), it uses ~20% more memory. At 1e-10, the space required
|
||||
// is double that of standard Bloom filter.
|
||||
//
|
||||
// For more details, see the 2010 paper by Putze, Sanders and Singler,
|
||||
// https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf.
|
||||
package blobloom
|
||||
|
||||
import "math"
|
||||
|
||||
// BlockBits is the number of bits per block and the minimum number of bits
|
||||
// in a Filter.
|
||||
//
|
||||
// The value of this constant is chosen to match the L1 cache line size
|
||||
// of popular architectures (386, amd64, arm64).
|
||||
const BlockBits = 512
|
||||
|
||||
// MaxBits is the maximum number of bits supported by a Filter.
|
||||
const MaxBits = BlockBits << 32 // 256GiB.
|
||||
|
||||
// A Filter is a blocked Bloom filter.
|
||||
type Filter struct {
|
||||
b []block // Shards.
|
||||
k int // Number of hash functions required.
|
||||
}
|
||||
|
||||
// New constructs a Bloom filter with given numbers of bits and hash functions.
|
||||
//
|
||||
// The number of bits should be at least BlockBits; smaller values are silently
|
||||
// increased.
|
||||
//
|
||||
// The number of hashes reflects the number of hashes synthesized from the
|
||||
// single hash passed in by the client. It is silently increased to two if
|
||||
// a lower value is given.
|
||||
func New(nbits uint64, nhashes int) *Filter {
|
||||
nbits, nhashes = fixBitsAndHashes(nbits, nhashes)
|
||||
|
||||
return &Filter{
|
||||
b: make([]block, nbits/BlockBits),
|
||||
k: nhashes,
|
||||
}
|
||||
}
|
||||
|
||||
func fixBitsAndHashes(nbits uint64, nhashes int) (uint64, int) {
|
||||
if nbits < 1 {
|
||||
nbits = BlockBits
|
||||
}
|
||||
if nhashes < 2 {
|
||||
nhashes = 2
|
||||
}
|
||||
if nbits > MaxBits {
|
||||
panic("nbits exceeds MaxBits")
|
||||
}
|
||||
|
||||
// Round nbits up to a multiple of BlockBits.
|
||||
if nbits%BlockBits != 0 {
|
||||
nbits += BlockBits - nbits%BlockBits
|
||||
}
|
||||
|
||||
return nbits, nhashes
|
||||
}
|
||||
|
||||
// Add insert a key with hash value h into f.
|
||||
func (f *Filter) Add(h uint64) {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
b.setbit(h1)
|
||||
}
|
||||
}
|
||||
|
||||
// log(1 - 1/BlockBits) computed with 128 bits precision.
|
||||
// Note that this is extremely close to -1/BlockBits,
|
||||
// which is what Wikipedia would have us use:
|
||||
// https://en.wikipedia.org/wiki/Bloom_filter#Approximating_the_number_of_items_in_a_Bloom_filter.
|
||||
const log1minus1divBlockbits = -0.0019550348358033505576274922418668121377
|
||||
|
||||
// Cardinality estimates the number of distinct keys added to f.
|
||||
//
|
||||
// The estimate is most reliable when f is filled to roughly its capacity.
|
||||
// It gets worse as f gets more densely filled. When one of the blocks is
|
||||
// entirely filled, the estimate becomes +Inf.
|
||||
//
|
||||
// The return value is the maximum likelihood estimate of Papapetrou, Siberski
|
||||
// and Nejdl, summed over the blocks
|
||||
// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf).
|
||||
func (f *Filter) Cardinality() float64 {
|
||||
return cardinality(f.k, f.b, onescount)
|
||||
}
|
||||
|
||||
func cardinality(nhashes int, b []block, onescount func(*block) int) float64 {
|
||||
k := float64(nhashes - 1)
|
||||
|
||||
// The probability of some bit not being set in a single insertion is
|
||||
// p0 = (1-1/BlockBits)^k.
|
||||
//
|
||||
// logProb0Inv = 1 / log(p0) = 1 / (k*log(1-1/BlockBits)).
|
||||
logProb0Inv := 1 / (k * log1minus1divBlockbits)
|
||||
|
||||
var n float64
|
||||
for i := range b {
|
||||
ones := onescount(&b[i])
|
||||
if ones == 0 {
|
||||
continue
|
||||
}
|
||||
n += math.Log1p(-float64(ones) / BlockBits)
|
||||
}
|
||||
return n * logProb0Inv
|
||||
}
|
||||
|
||||
// Clear resets f to its empty state.
|
||||
func (f *Filter) Clear() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
f.b[i] = block{}
|
||||
}
|
||||
}
|
||||
|
||||
// Empty reports whether f contains no keys.
|
||||
func (f *Filter) Empty() bool {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
if f.b[i] != (block{}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals returns true if f and g contain the same keys (in terms of Has)
|
||||
// when used with the same hash function.
|
||||
func (f *Filter) Equals(g *Filter) bool {
|
||||
if g.k != f.k || len(g.b) != len(f.b) {
|
||||
return false
|
||||
}
|
||||
for i := range g.b {
|
||||
if f.b[i] != g.b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Fill set f to a completely full filter.
|
||||
// After Fill, Has returns true for any key.
|
||||
func (f *Filter) Fill() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
f.b[i][j] = ^uint32(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Has reports whether a key with hash value h has been added.
|
||||
// It may return a false positive.
|
||||
func (f *Filter) Has(h uint64) bool {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
if !b.getbit(h1) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// doublehash generates the hash values to use in iteration i of
|
||||
// enhanced double hashing from the values h1, h2 of the previous iteration.
|
||||
// See https://www.ccs.neu.edu/home/pete/pub/bloom-filters-verification.pdf.
|
||||
func doublehash(h1, h2 uint32, i int) (uint32, uint32) {
|
||||
h1 = h1 + h2
|
||||
h2 = h2 + uint32(i)
|
||||
return h1, h2
|
||||
}
|
||||
|
||||
// NumBits returns the number of bits of f.
|
||||
func (f *Filter) NumBits() uint64 {
|
||||
return BlockBits * uint64(len(f.b))
|
||||
}
|
||||
|
||||
func checkBinop(f, g *Filter) {
|
||||
if len(f.b) != len(g.b) {
|
||||
panic("Bloom filters do not have the same number of bits")
|
||||
}
|
||||
if f.k != g.k {
|
||||
panic("Bloom filters do not have the same number of hash functions")
|
||||
}
|
||||
}
|
||||
|
||||
// Intersect sets f to the intersection of f and g.
|
||||
//
|
||||
// Intersect panics when f and g do not have the same number of bits and
|
||||
// hash functions. Both Filters must be using the same hash function(s),
|
||||
// but Intersect cannot check this.
|
||||
//
|
||||
// Since Bloom filters may return false positives, Has may return true for
|
||||
// a key that was not in both f and g.
|
||||
//
|
||||
// After Intersect, the estimates from f.Cardinality and f.FPRate should be
|
||||
// considered unreliable.
|
||||
func (f *Filter) Intersect(g *Filter) {
|
||||
checkBinop(f, g)
|
||||
f.intersect(g)
|
||||
}
|
||||
|
||||
// Union sets f to the union of f and g.
|
||||
//
|
||||
// Union panics when f and g do not have the same number of bits and
|
||||
// hash functions. Both Filters must be using the same hash function(s),
|
||||
// but Union cannot check this.
|
||||
func (f *Filter) Union(g *Filter) {
|
||||
checkBinop(f, g)
|
||||
f.union(g)
|
||||
}
|
||||
|
||||
const (
|
||||
wordSize = 32
|
||||
blockWords = BlockBits / wordSize
|
||||
)
|
||||
|
||||
// A block is a fixed-size Bloom filter, used as a shard of a Filter.
|
||||
type block [blockWords]uint32
|
||||
|
||||
func getblock(b []block, h2 uint32) *block {
|
||||
i := reducerange(h2, uint32(len(b)))
|
||||
return &b[i]
|
||||
}
|
||||
|
||||
// reducerange maps i to an integer in the range [0,n).
|
||||
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
func reducerange(i, n uint32) uint32 {
|
||||
return uint32((uint64(i) * uint64(n)) >> 32)
|
||||
}
|
||||
|
||||
// getbit reports whether bit (i modulo BlockBits) is set.
|
||||
func (b *block) getbit(i uint32) bool {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
x := (*b)[(i/wordSize)%blockWords] & bit
|
||||
return x != 0
|
||||
}
|
||||
|
||||
// setbit sets bit (i modulo BlockBits) of b.
|
||||
func (b *block) setbit(i uint32) {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
(*b)[(i/wordSize)%blockWords] |= bit
|
||||
}
|
246
vendor/github.com/greatroar/blobloom/io.go
generated
vendored
246
vendor/github.com/greatroar/blobloom/io.go
generated
vendored
|
@ -1,246 +0,0 @@
|
|||
// Copyright 2023 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const maxCommentLen = 44
|
||||
|
||||
// Dump writes f to w, with an optional comment string, in the binary format
|
||||
// that a Loader accepts. It returns the number of bytes written to w.
|
||||
//
|
||||
// The comment may contain arbitrary data, within the limits layed out by the
|
||||
// format description. It can be used to record the hash function to be used
|
||||
// with a Filter.
|
||||
func Dump(w io.Writer, f *Filter, comment string) (int64, error) {
|
||||
return dump(w, f.b, f.k, comment)
|
||||
}
|
||||
|
||||
// DumpSync is like Dump, but for SyncFilters.
|
||||
//
|
||||
// If other goroutines are simultaneously modifying f,
|
||||
// their modifications may not be reflected in the dump.
|
||||
// Separate synchronization is required to prevent this.
|
||||
//
|
||||
// The format produced is the same as Dump's. The fact that
|
||||
// the argument is a SyncFilter is not encoded in the dump.
|
||||
func DumpSync(w io.Writer, f *SyncFilter, comment string) (n int64, err error) {
|
||||
return dump(w, f.b, f.k, comment)
|
||||
}
|
||||
|
||||
func dump(w io.Writer, b []block, nhashes int, comment string) (n int64, err error) {
|
||||
switch {
|
||||
case len(b) == 0 || nhashes == 0:
|
||||
err = errors.New("blobloom: won't dump uninitialized Filter")
|
||||
case len(comment) > maxCommentLen:
|
||||
err = fmt.Errorf("blobloom: comment of length %d too long", len(comment))
|
||||
case strings.IndexByte(comment, 0) != -1:
|
||||
err = fmt.Errorf("blobloom: comment %q contains zero byte", len(comment))
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var buf [64]byte
|
||||
copy(buf[:8], "blobloom")
|
||||
// As documented in the comment for Loader, we store one less than the
|
||||
// number of blocks. This way, we can use the otherwise invalid value 0
|
||||
// and store 2³² blocks instead of at most 2³²-1.
|
||||
binary.LittleEndian.PutUint32(buf[12:], uint32(len(b)-1))
|
||||
binary.LittleEndian.PutUint32(buf[16:], uint32(nhashes))
|
||||
copy(buf[20:], comment)
|
||||
|
||||
k, err := w.Write(buf[:])
|
||||
n = int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
for i := range b {
|
||||
for j := range b[i] {
|
||||
x := atomic.LoadUint32(&b[i][j])
|
||||
binary.LittleEndian.PutUint32(buf[4*j:], x)
|
||||
}
|
||||
k, err = w.Write(buf[:])
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// A Loader reads a Filter or SyncFilter from an io.Reader.
|
||||
//
|
||||
// A Loader accepts the binary format produced by Dump. The format starts
|
||||
// with a 64-byte header:
|
||||
// - the string "blobloom", in ASCII;
|
||||
// - a four-byte version number, which must be zero;
|
||||
// - the number of Bloom filter blocks, minus one, as a 32-bit integer;
|
||||
// - the number of hashes, as a 32-bit integer;
|
||||
// - a comment of at most 44 non-zero bytes, padded to 44 bytes with zeros.
|
||||
//
|
||||
// After the header come the 512-bit blocks, divided into sixteen 32-bit limbs.
|
||||
// All integers are little-endian.
|
||||
type Loader struct {
|
||||
buf [64]byte
|
||||
r io.Reader
|
||||
err error
|
||||
|
||||
Comment string // Comment field. Filled in by NewLoader.
|
||||
nblocks uint64
|
||||
nhashes int
|
||||
}
|
||||
|
||||
// NewLoader parses the format header from r and returns a Loader
|
||||
// that can be used to load a Filter from it.
|
||||
func NewLoader(r io.Reader) (*Loader, error) {
|
||||
l := &Loader{r: r}
|
||||
|
||||
err := l.fillbuf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
version := binary.LittleEndian.Uint32(l.buf[8:])
|
||||
// See comment in dump for the +1.
|
||||
l.nblocks = 1 + uint64(binary.LittleEndian.Uint32(l.buf[12:]))
|
||||
l.nhashes = int(binary.LittleEndian.Uint32(l.buf[16:]))
|
||||
comment := l.buf[20:]
|
||||
|
||||
switch {
|
||||
case string(l.buf[:8]) != "blobloom":
|
||||
err = errors.New("blobloom: not a Bloom filter dump")
|
||||
case version != 0:
|
||||
err = errors.New("blobloom: unsupported dump version")
|
||||
case l.nhashes == 0:
|
||||
err = errors.New("blobloom: zero hashes in Bloom filter dump")
|
||||
}
|
||||
if err == nil {
|
||||
comment, err = checkComment(comment)
|
||||
l.Comment = string(comment)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
l = nil
|
||||
}
|
||||
return l, err
|
||||
}
|
||||
|
||||
// Load sets f to the union of f and the Loader's filter, then returns f.
|
||||
// If f is nil, a new Filter of the appropriate size is constructed.
|
||||
//
|
||||
// If f is not nil and an error occurs while reading from the Loader,
|
||||
// f may end up in an inconsistent state.
|
||||
func (l *Loader) Load(f *Filter) (*Filter, error) {
|
||||
if f == nil {
|
||||
nbits := BlockBits * l.nblocks
|
||||
if nbits > MaxBits {
|
||||
return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks)
|
||||
}
|
||||
f = New(nbits, int(l.nhashes))
|
||||
} else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range f.b {
|
||||
if err := l.fillbuf(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for j := range f.b[i] {
|
||||
f.b[i][j] |= binary.LittleEndian.Uint32(l.buf[4*j:])
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Load sets f to the union of f and the Loader's filter, then returns f.
|
||||
// If f is nil, a new SyncFilter of the appropriate size is constructed.
|
||||
// Else, LoadSync may run concurrently with other modifications to f.
|
||||
//
|
||||
// If f is not nil and an error occurs while reading from the Loader,
|
||||
// f may end up in an inconsistent state.
|
||||
func (l *Loader) LoadSync(f *SyncFilter) (*SyncFilter, error) {
|
||||
if f == nil {
|
||||
nbits := BlockBits * l.nblocks
|
||||
if nbits > MaxBits {
|
||||
return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks)
|
||||
}
|
||||
f = NewSync(nbits, int(l.nhashes))
|
||||
} else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range f.b {
|
||||
if err := l.fillbuf(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for j := range f.b[i] {
|
||||
p := &f.b[i][j]
|
||||
x := binary.LittleEndian.Uint32(l.buf[4*j:])
|
||||
|
||||
for {
|
||||
old := atomic.LoadUint32(p)
|
||||
if atomic.CompareAndSwapUint32(p, old, old|x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *Loader) checkBitsAndHashes(nblocks, nhashes int) error {
|
||||
switch {
|
||||
case nblocks != int(l.nblocks):
|
||||
return fmt.Errorf("blobloom: Filter has %d blocks, but dump has %d", nblocks, l.nblocks)
|
||||
case nhashes != l.nhashes:
|
||||
return fmt.Errorf("blobloom: Filter has %d hashes, but dump has %d", nhashes, l.nhashes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) fillbuf() error {
|
||||
_, err := io.ReadFull(l.r, l.buf[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func checkComment(p []byte) ([]byte, error) {
|
||||
eos := bytes.IndexByte(p, 0)
|
||||
if eos != -1 {
|
||||
tail := p[eos+1:]
|
||||
if !bytes.Equal(tail, make([]byte, len(tail))) {
|
||||
return nil, fmt.Errorf("blobloom: comment block %q contains zero byte", p)
|
||||
}
|
||||
p = p[:eos]
|
||||
}
|
||||
return p, nil
|
||||
}
|
201
vendor/github.com/greatroar/blobloom/optimize.go
generated
vendored
201
vendor/github.com/greatroar/blobloom/optimize.go
generated
vendored
|
@ -1,201 +0,0 @@
|
|||
// Copyright 2020 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import "math"
|
||||
|
||||
// A Config holds parameters for Optimize or NewOptimized.
|
||||
type Config struct {
|
||||
// Trigger the "contains filtered or unexported fields" message for
|
||||
// forward compatibility and force the caller to use named fields.
|
||||
_ struct{}
|
||||
|
||||
// Capacity is the expected number of distinct keys to be added.
|
||||
// More keys can always be added, but the false positive rate can be
|
||||
// expected to drop below FPRate if their number exceeds the Capacity.
|
||||
Capacity uint64
|
||||
|
||||
// Desired lower bound on the false positive rate when the Bloom filter
|
||||
// has been filled to its capacity. FPRate must be between zero
|
||||
// (exclusive) and one (inclusive).
|
||||
FPRate float64
|
||||
|
||||
// Maximum size of the Bloom filter in bits. Zero means the global
|
||||
// MaxBits constant. A value less than BlockBits means BlockBits.
|
||||
MaxBits uint64
|
||||
}
|
||||
|
||||
// NewOptimized is shorthand for New(Optimize(config)).
|
||||
func NewOptimized(config Config) *Filter {
|
||||
return New(Optimize(config))
|
||||
}
|
||||
|
||||
// NewSyncOptimized is shorthand for New(Optimize(config)).
|
||||
func NewSyncOptimized(config Config) *SyncFilter {
|
||||
return NewSync(Optimize(config))
|
||||
}
|
||||
|
||||
// Optimize returns numbers of keys and hash functions that achieve the
|
||||
// desired false positive described by config.
|
||||
//
|
||||
// Optimize panics when config.FPRate is invalid.
|
||||
//
|
||||
// The estimated number of bits is imprecise for false positives rates below
|
||||
// ca. 1e-15.
|
||||
func Optimize(config Config) (nbits uint64, nhashes int) {
|
||||
n := float64(config.Capacity)
|
||||
p := config.FPRate
|
||||
|
||||
if p <= 0 || p > 1 {
|
||||
panic("false positive rate for a Bloom filter must be > 0, <= 1")
|
||||
}
|
||||
if n == 0 {
|
||||
// Assume the client wants to add at least one key; log2(0) = -inf.
|
||||
n = 1
|
||||
}
|
||||
|
||||
// The optimal nbits/n is c = -log2(p) / ln(2) for a vanilla Bloom filter.
|
||||
c := math.Ceil(-math.Log2(p) / math.Ln2)
|
||||
if c < float64(len(correctC)) {
|
||||
c = float64(correctC[int(c)])
|
||||
} else {
|
||||
// We can't achieve the desired FPR. Just triple the number of bits.
|
||||
c *= 3
|
||||
}
|
||||
nbits = uint64(c * n)
|
||||
|
||||
// Round up to a multiple of BlockBits.
|
||||
if nbits%BlockBits != 0 {
|
||||
nbits += BlockBits - nbits%BlockBits
|
||||
}
|
||||
|
||||
var maxbits uint64 = MaxBits
|
||||
if config.MaxBits != 0 && config.MaxBits < maxbits {
|
||||
maxbits = config.MaxBits
|
||||
if maxbits < BlockBits {
|
||||
maxbits = BlockBits
|
||||
}
|
||||
}
|
||||
if nbits > maxbits {
|
||||
nbits = maxbits
|
||||
// Round down to a multiple of BlockBits.
|
||||
nbits -= nbits % BlockBits
|
||||
}
|
||||
|
||||
// The corresponding optimal number of hash functions is k = c * log(2).
|
||||
// Try rounding up and down to see which rounding is better.
|
||||
c = float64(nbits) / n
|
||||
k := c * math.Ln2
|
||||
if k < 1 {
|
||||
nhashes = 1
|
||||
return nbits, nhashes
|
||||
}
|
||||
|
||||
ceilK, floorK := math.Floor(k), math.Ceil(k)
|
||||
if ceilK == floorK {
|
||||
return nbits, int(ceilK)
|
||||
}
|
||||
|
||||
fprCeil, _ := fpRate(c, math.Ceil(k))
|
||||
fprFloor, _ := fpRate(c, math.Floor(k))
|
||||
if fprFloor < fprCeil {
|
||||
k = floorK
|
||||
} else {
|
||||
k = ceilK
|
||||
}
|
||||
|
||||
return nbits, int(k)
|
||||
}
|
||||
|
||||
// correctC maps c = m/n for a vanilla Bloom filter to the c' for a
|
||||
// blocked Bloom filter.
|
||||
//
|
||||
// This is Putze et al.'s Table I, extended down to zero.
|
||||
// For c > 34, the values become huge and are hard to compute.
|
||||
var correctC = []byte{
|
||||
1, 1, 2, 4, 5,
|
||||
6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 20, 21, 23,
|
||||
25, 26, 28, 30, 32, 35, 38, 40, 44, 48, 51, 58, 64, 74, 90,
|
||||
}
|
||||
|
||||
// FPRate computes an estimate of the false positive rate of a Bloom filter
|
||||
// after nkeys distinct keys have been added.
|
||||
func FPRate(nkeys, nbits uint64, nhashes int) float64 {
|
||||
if nkeys == 0 {
|
||||
return 0
|
||||
}
|
||||
p, _ := fpRate(float64(nbits)/float64(nkeys), float64(nhashes))
|
||||
return p
|
||||
}
|
||||
|
||||
func fpRate(c, k float64) (p float64, iter int) {
|
||||
switch {
|
||||
case c == 0:
|
||||
panic("0 bits per key is too few")
|
||||
case k == 0:
|
||||
panic("0 hashes is too few")
|
||||
}
|
||||
|
||||
// Putze et al.'s Equation (3).
|
||||
//
|
||||
// The Poisson distribution has a single spike around its mean
|
||||
// BlockBits/c that gets slimmer and further away from zero as c tends
|
||||
// to zero (the Bloom filter gets more filled). We start at the mean,
|
||||
// then add terms left and right of it until their relative contribution
|
||||
// drops below ε.
|
||||
const ε = 1e-9
|
||||
mean := BlockBits / c
|
||||
|
||||
// Ceil to make sure we start at one, not zero.
|
||||
i := math.Ceil(mean)
|
||||
p = math.Exp(logPoisson(mean, i) + logFprBlock(BlockBits/i, k))
|
||||
|
||||
for j := i - 1; j > 0; j-- {
|
||||
add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k))
|
||||
p += add
|
||||
iter++
|
||||
if add/p < ε {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for j := i + 1; ; j++ {
|
||||
add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k))
|
||||
p += add
|
||||
iter++
|
||||
if add/p < ε {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return p, iter
|
||||
}
|
||||
|
||||
// FPRate computes an estimate of f's false positive rate after nkeys distinct
|
||||
// keys have been added.
|
||||
func (f *Filter) FPRate(nkeys uint64) float64 {
|
||||
return FPRate(nkeys, f.NumBits(), f.k)
|
||||
}
|
||||
|
||||
// Log of the FPR of a single block, FPR = (1 - exp(-k/c))^k.
|
||||
func logFprBlock(c, k float64) float64 {
|
||||
return k * math.Log1p(-math.Exp(-k/c))
|
||||
}
|
||||
|
||||
// Log of the Poisson distribution's pmf.
|
||||
func logPoisson(λ, k float64) float64 {
|
||||
lg, _ := math.Lgamma(k + 1)
|
||||
return k*math.Log(λ) - λ - lg
|
||||
}
|
148
vendor/github.com/greatroar/blobloom/setop_64bit.go
generated
vendored
148
vendor/github.com/greatroar/blobloom/setop_64bit.go
generated
vendored
|
@ -1,148 +0,0 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (amd64 || arm64) && !nounsafe
|
||||
// +build amd64 arm64
|
||||
// +build !nounsafe
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Block reinterpreted as array of uint64.
|
||||
type block64 [BlockBits / 64]uint64
|
||||
|
||||
func (f *Filter) intersect(g *Filter) {
|
||||
a, b := f.b, g.b
|
||||
for len(a) >= 2 && len(b) >= 2 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
|
||||
p = (*block64)(unsafe.Pointer(&a[1]))
|
||||
q = (*block64)(unsafe.Pointer(&b[1]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
|
||||
a, b = a[2:], b[2:]
|
||||
}
|
||||
|
||||
if len(a) > 0 && len(b) > 0 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Filter) union(g *Filter) {
|
||||
a, b := f.b, g.b
|
||||
for len(a) >= 2 && len(b) >= 2 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
|
||||
p = (*block64)(unsafe.Pointer(&a[1]))
|
||||
q = (*block64)(unsafe.Pointer(&b[1]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
|
||||
a, b = a[2:], b[2:]
|
||||
}
|
||||
|
||||
if len(a) > 0 && len(b) > 0 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
}
|
||||
}
|
||||
|
||||
func onescount(b *block) (n int) {
|
||||
p := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
n += bits.OnesCount64(p[0])
|
||||
n += bits.OnesCount64(p[1])
|
||||
n += bits.OnesCount64(p[2])
|
||||
n += bits.OnesCount64(p[3])
|
||||
n += bits.OnesCount64(p[4])
|
||||
n += bits.OnesCount64(p[5])
|
||||
n += bits.OnesCount64(p[6])
|
||||
n += bits.OnesCount64(p[7])
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func onescountAtomic(b *block) (n int) {
|
||||
p := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[0]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[1]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[2]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[3]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[4]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[5]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[6]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[7]))
|
||||
|
||||
return n
|
||||
}
|
115
vendor/github.com/greatroar/blobloom/setop_other.go
generated
vendored
115
vendor/github.com/greatroar/blobloom/setop_other.go
generated
vendored
|
@ -1,115 +0,0 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (!amd64 && !arm64) || nounsafe
|
||||
// +build !amd64,!arm64 nounsafe
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
func (f *Filter) intersect(g *Filter) {
|
||||
for i := range f.b {
|
||||
f.b[i].intersect(&g.b[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Filter) union(g *Filter) {
|
||||
for i := range f.b {
|
||||
f.b[i].union(&g.b[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (b *block) intersect(c *block) {
|
||||
b[0] &= c[0]
|
||||
b[1] &= c[1]
|
||||
b[2] &= c[2]
|
||||
b[3] &= c[3]
|
||||
b[4] &= c[4]
|
||||
b[5] &= c[5]
|
||||
b[6] &= c[6]
|
||||
b[7] &= c[7]
|
||||
b[8] &= c[8]
|
||||
b[9] &= c[9]
|
||||
b[10] &= c[10]
|
||||
b[11] &= c[11]
|
||||
b[12] &= c[12]
|
||||
b[13] &= c[13]
|
||||
b[14] &= c[14]
|
||||
b[15] &= c[15]
|
||||
}
|
||||
|
||||
func (b *block) union(c *block) {
|
||||
b[0] |= c[0]
|
||||
b[1] |= c[1]
|
||||
b[2] |= c[2]
|
||||
b[3] |= c[3]
|
||||
b[4] |= c[4]
|
||||
b[5] |= c[5]
|
||||
b[6] |= c[6]
|
||||
b[7] |= c[7]
|
||||
b[8] |= c[8]
|
||||
b[9] |= c[9]
|
||||
b[10] |= c[10]
|
||||
b[11] |= c[11]
|
||||
b[12] |= c[12]
|
||||
b[13] |= c[13]
|
||||
b[14] |= c[14]
|
||||
b[15] |= c[15]
|
||||
}
|
||||
|
||||
func onescount(b *block) (n int) {
|
||||
n += bits.OnesCount32(b[0])
|
||||
n += bits.OnesCount32(b[1])
|
||||
n += bits.OnesCount32(b[2])
|
||||
n += bits.OnesCount32(b[3])
|
||||
n += bits.OnesCount32(b[4])
|
||||
n += bits.OnesCount32(b[5])
|
||||
n += bits.OnesCount32(b[6])
|
||||
n += bits.OnesCount32(b[7])
|
||||
n += bits.OnesCount32(b[8])
|
||||
n += bits.OnesCount32(b[9])
|
||||
n += bits.OnesCount32(b[10])
|
||||
n += bits.OnesCount32(b[11])
|
||||
n += bits.OnesCount32(b[12])
|
||||
n += bits.OnesCount32(b[13])
|
||||
n += bits.OnesCount32(b[14])
|
||||
n += bits.OnesCount32(b[15])
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func onescountAtomic(b *block) (n int) {
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[0]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[1]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[2]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[3]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[4]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[5]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[6]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[7]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[8]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[9]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[10]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[11]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[12]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[13]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[14]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[15]))
|
||||
|
||||
return n
|
||||
}
|
145
vendor/github.com/greatroar/blobloom/sync.go
generated
vendored
145
vendor/github.com/greatroar/blobloom/sync.go
generated
vendored
|
@ -1,145 +0,0 @@
|
|||
// Copyright 2021-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// A SyncFilter is a Bloom filter that can be accessed and updated
|
||||
// by multiple goroutines concurrently.
|
||||
//
|
||||
// A SyncFilter mostly behaves as a regular filter protected by a lock,
|
||||
//
|
||||
// type SyncFilter struct {
|
||||
// Filter
|
||||
// lock sync.Mutex
|
||||
// }
|
||||
//
|
||||
// with each method taking and releasing the lock,
|
||||
// but is implemented much more efficiently.
|
||||
// See the method descriptions for exceptions to the previous rule.
|
||||
type SyncFilter struct {
|
||||
b []block // Shards.
|
||||
k int // Number of hash functions required.
|
||||
}
|
||||
|
||||
// NewSync constructs a Bloom filter with given numbers of bits and hash functions.
|
||||
//
|
||||
// The number of bits should be at least BlockBits; smaller values are silently
|
||||
// increased.
|
||||
//
|
||||
// The number of hashes reflects the number of hashes synthesized from the
|
||||
// single hash passed in by the client. It is silently increased to two if
|
||||
// a lower value is given.
|
||||
func NewSync(nbits uint64, nhashes int) *SyncFilter {
|
||||
nbits, nhashes = fixBitsAndHashes(nbits, nhashes)
|
||||
|
||||
return &SyncFilter{
|
||||
b: make([]block, nbits/BlockBits),
|
||||
k: nhashes,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add insert a key with hash value h into f.
|
||||
func (f *SyncFilter) Add(h uint64) {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
setbitAtomic(b, h1)
|
||||
}
|
||||
}
|
||||
|
||||
// Cardinality estimates the number of distinct keys added to f.
|
||||
//
|
||||
// The estimate is most reliable when f is filled to roughly its capacity.
|
||||
// It gets worse as f gets more densely filled. When one of the blocks is
|
||||
// entirely filled, the estimate becomes +Inf.
|
||||
//
|
||||
// The return value is the maximum likelihood estimate of Papapetrou, Siberski
|
||||
// and Nejdl, summed over the blocks
|
||||
// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf).
|
||||
//
|
||||
// If other goroutines are concurrently adding keys,
|
||||
// the estimate may lie in between what would have been returned
|
||||
// before the concurrent updates started and what is returned
|
||||
// after the updates complete.
|
||||
func (f *SyncFilter) Cardinality() float64 {
|
||||
return cardinality(f.k, f.b, onescountAtomic)
|
||||
}
|
||||
|
||||
// Empty reports whether f contains no keys.
|
||||
//
|
||||
// If other goroutines are concurrently adding keys,
|
||||
// Empty may return a false positive.
|
||||
func (f *SyncFilter) Empty() bool {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
if atomic.LoadUint32(&f.b[i][j]) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Fill sets f to a completely full filter.
|
||||
// After Fill, Has returns true for any key.
|
||||
func (f *SyncFilter) Fill() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
atomic.StoreUint32(&f.b[i][j], ^uint32(0))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Has reports whether a key with hash value h has been added.
|
||||
// It may return a false positive.
|
||||
func (f *SyncFilter) Has(h uint64) bool {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
if !getbitAtomic(b, h1) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getbitAtomic reports whether bit (i modulo BlockBits) is set.
|
||||
func getbitAtomic(b *block, i uint32) bool {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
x := atomic.LoadUint32(&(*b)[(i/wordSize)%blockWords])
|
||||
return x&bit != 0
|
||||
}
|
||||
|
||||
// setbit sets bit (i modulo BlockBits) of b, atomically.
|
||||
func setbitAtomic(b *block, i uint32) {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
p := &(*b)[(i/wordSize)%blockWords]
|
||||
|
||||
for {
|
||||
old := atomic.LoadUint32(p)
|
||||
if old&bit != 0 {
|
||||
// Checking here instead of checking the return value from
|
||||
// the CAS is between 50% and 80% faster on the benchmark.
|
||||
return
|
||||
}
|
||||
atomic.CompareAndSwapUint32(p, old, old|bit)
|
||||
}
|
||||
}
|
16
vendor/github.com/greatroar/blobloom/test.sh
generated
vendored
16
vendor/github.com/greatroar/blobloom/test.sh
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e -x
|
||||
|
||||
golangci-lint run . examples/*
|
||||
|
||||
go test
|
||||
|
||||
if [ "$(go env GOARCH)" = amd64 ]; then
|
||||
go test -tags nounsafe
|
||||
GOARCH=386 go test
|
||||
fi
|
||||
|
||||
for e in examples/*; do
|
||||
(cd $e && go build && rm $(basename $e))
|
||||
done
|
2
vendor/github.com/nbd-wtf/go-nostr/kinds.go
generated
vendored
2
vendor/github.com/nbd-wtf/go-nostr/kinds.go
generated
vendored
|
@ -26,7 +26,7 @@ const (
|
|||
KindChess int = 64
|
||||
KindMergeRequests int = 818
|
||||
KindBid int = 1021
|
||||
KIndBidConfirmation int = 1022
|
||||
KindBidConfirmation int = 1022
|
||||
KindOpenTimestamps int = 1040
|
||||
KindGiftWrap int = 1059
|
||||
KindFileMetadata int = 1063
|
||||
|
|
12
vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go
generated
vendored
12
vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go
generated
vendored
|
@ -5,8 +5,6 @@ import (
|
|||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/greatroar/blobloom"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector"
|
||||
|
@ -88,10 +86,7 @@ func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, fil
|
|||
go func(dir direction) {
|
||||
defer wg.Done()
|
||||
|
||||
seen := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: 10000,
|
||||
FPRate: 0.01,
|
||||
})
|
||||
seen := make(map[string]struct{})
|
||||
|
||||
doSync := func(ids []string) {
|
||||
defer wg.Done()
|
||||
|
@ -112,12 +107,11 @@ func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, fil
|
|||
|
||||
ids := pool.grab()
|
||||
for item := range dir.items {
|
||||
h := xxhash.Sum64([]byte(item))
|
||||
if seen.Has(h) {
|
||||
if _, ok := seen[item]; ok {
|
||||
continue
|
||||
}
|
||||
seen[item] = struct{}{}
|
||||
|
||||
seen.Add(h)
|
||||
ids = append(ids, item)
|
||||
if len(ids) == 50 {
|
||||
wg.Add(1)
|
||||
|
|
12
vendor/github.com/nbd-wtf/go-nostr/relay.go
generated
vendored
12
vendor/github.com/nbd-wtf/go-nostr/relay.go
generated
vendored
|
@ -182,11 +182,13 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil)
|
||||
if err != nil {
|
||||
InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err)
|
||||
r.Close() // this should trigger a context cancelation
|
||||
return
|
||||
if r.Connection != nil {
|
||||
err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil)
|
||||
if err != nil {
|
||||
InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err)
|
||||
r.Close() // this should trigger a context cancelation
|
||||
return
|
||||
}
|
||||
}
|
||||
case writeRequest := <-r.writeQueue:
|
||||
// all write requests will go through this to prevent races
|
||||
|
|
14
vendor/modules.txt
vendored
14
vendor/modules.txt
vendored
|
@ -15,9 +15,9 @@ github.com/btcsuite/btcd/btcec/v2/schnorr
|
|||
# github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
|
||||
## explicit; go 1.17
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash
|
||||
# github.com/cespare/xxhash v1.1.0
|
||||
# github.com/davecgh/go-spew v1.1.1
|
||||
## explicit
|
||||
github.com/cespare/xxhash
|
||||
github.com/davecgh/go-spew/spew
|
||||
# github.com/decred/dcrd/crypto/blake256 v1.1.0
|
||||
## explicit; go 1.17
|
||||
github.com/decred/dcrd/crypto/blake256
|
||||
|
@ -33,7 +33,7 @@ github.com/fasthttp/websocket
|
|||
## explicit; go 1.23.1
|
||||
github.com/fiatjaf/eventstore
|
||||
github.com/fiatjaf/eventstore/postgresql
|
||||
# github.com/fiatjaf/khatru v0.12.0
|
||||
# github.com/fiatjaf/khatru v0.12.1
|
||||
## explicit; go 1.23.1
|
||||
github.com/fiatjaf/khatru
|
||||
github.com/fiatjaf/khatru/policies
|
||||
|
@ -51,9 +51,9 @@ github.com/gobwas/pool/pbytes
|
|||
github.com/gobwas/ws
|
||||
github.com/gobwas/ws/wsflate
|
||||
github.com/gobwas/ws/wsutil
|
||||
# github.com/greatroar/blobloom v0.8.0
|
||||
## explicit; go 1.14
|
||||
github.com/greatroar/blobloom
|
||||
# github.com/gorilla/schema v1.4.1
|
||||
## explicit; go 1.20
|
||||
github.com/gorilla/schema
|
||||
# github.com/jmoiron/sqlx v1.4.0
|
||||
## explicit; go 1.10
|
||||
github.com/jmoiron/sqlx
|
||||
|
@ -90,7 +90,7 @@ github.com/mattn/go-colorable
|
|||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/nbd-wtf/go-nostr v0.42.2
|
||||
# github.com/nbd-wtf/go-nostr v0.42.3
|
||||
## explicit; go 1.23.1
|
||||
github.com/nbd-wtf/go-nostr
|
||||
github.com/nbd-wtf/go-nostr/nip11
|
||||
|
|
Loading…
Reference in a new issue