Add goreleaser
This commit is contained in:
parent
4f6b5f519b
commit
67b56010c4
64 changed files with 3752 additions and 97 deletions
2
.gitignore
vendored
2
.gitignore
vendored
|
@ -26,3 +26,5 @@ well-goknown
|
|||
|
||||
# dev environment vars
|
||||
dev.env
|
||||
|
||||
dist/
|
||||
|
|
46
.goreleaser.yaml
Normal file
46
.goreleaser.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
# This is an example .goreleaser.yml file with some sensible defaults.
|
||||
# Make sure to check the documentation at https://goreleaser.com
|
||||
|
||||
# The lines below are called `modelines`. See `:help modeline`
|
||||
# Feel free to remove those if you don't want/need to use them.
|
||||
# yaml-language-server: $schema=https://goreleaser.com/static/schema.json
|
||||
# vim: set ts=2 sw=2 tw=0 fo=cnqoj
|
||||
|
||||
version: 2
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
# you may remove this if you don't need go generate
|
||||
- go generate ./...
|
||||
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
|
||||
archives:
|
||||
- format: tar.gz
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
|
||||
changelog:
|
||||
sort: asc
|
||||
filters:
|
||||
exclude:
|
||||
- "^docs:"
|
||||
- "^test:"
|
11
go.mod
11
go.mod
|
@ -4,23 +4,26 @@ go 1.23.1
|
|||
|
||||
require (
|
||||
git.devvul.com/asara/gologger v0.9.0
|
||||
github.com/fiatjaf/eventstore v0.11.3
|
||||
github.com/fiatjaf/khatru v0.8.3
|
||||
github.com/fiatjaf/eventstore v0.12.0
|
||||
github.com/fiatjaf/khatru v0.9.1
|
||||
github.com/jmoiron/sqlx v1.4.0
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/nbd-wtf/go-nostr v0.38.2
|
||||
github.com/nbd-wtf/go-nostr v0.40.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/andybalholm/brotli v1.1.1 // indirect
|
||||
github.com/bep/debounce v1.2.1 // indirect
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
|
||||
github.com/cespare/xxhash v1.1.0 // indirect
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
|
||||
github.com/fasthttp/websocket v1.5.10 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/gobwas/ws v1.4.0 // indirect
|
||||
github.com/greatroar/blobloom v0.8.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
|
@ -34,7 +37,7 @@ require (
|
|||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.56.0 // indirect
|
||||
github.com/valyala/fasthttp v1.57.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
|
||||
golang.org/x/net v0.30.0 // indirect
|
||||
golang.org/x/sys v0.26.0 // indirect
|
||||
|
|
33
go.sum
33
go.sum
|
@ -2,13 +2,20 @@ filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
|
|||
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
|
||||
git.devvul.com/asara/gologger v0.9.0 h1:gijJpkPjvzI5S/dmAXgYoKJbp5uuaETAOBYWo7bJg6U=
|
||||
git.devvul.com/asara/gologger v0.9.0/go.mod h1:APr1DdVYByFfPUGHqHtRMhxphQbj92/vT/t0iM40H/0=
|
||||
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/bep/debounce v1.2.1 h1:v67fRdBA9UQu2NhLFXrSg0Brw7CexQekrBwDMM8bzeY=
|
||||
github.com/bep/debounce v1.2.1/go.mod h1:H8yggRPQKLUhUoqrJC1bO2xNya7vanpDl7xR3ISbCJ0=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurTXGPFfiQ=
|
||||
github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decred/dcrd/crypto/blake256 v1.1.0 h1:zPMNGQCm0g4QTY27fOCorQW7EryeQ/U0x++OzVrdms8=
|
||||
|
@ -17,10 +24,10 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN
|
|||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
|
||||
github.com/fasthttp/websocket v1.5.10 h1:bc7NIGyrg1L6sd5pRzCIbXpro54SZLEluZCu0rOpcN4=
|
||||
github.com/fasthttp/websocket v1.5.10/go.mod h1:BwHeuXGWzCW1/BIKUKD3+qfCl+cTdsHu/f243NcAI/Q=
|
||||
github.com/fiatjaf/eventstore v0.11.3 h1:Lqm/8fx0R+Q9jVAxx4y+TNA9UliXVYIhOSRuJgW8T7o=
|
||||
github.com/fiatjaf/eventstore v0.11.3/go.mod h1:oCHPB4TprrNjbhH2kjMKt1O48O1pk3VxAy5iZkB5Fb0=
|
||||
github.com/fiatjaf/khatru v0.8.3 h1:bCXyfoPYesmJ00jmi7IyoLpE/CB/tPUw4nP62/3jbBw=
|
||||
github.com/fiatjaf/khatru v0.8.3/go.mod h1:44X/Mcc+2ObOqz+/fDbhAW3BeUEPKxDgrX9St/cXEKc=
|
||||
github.com/fiatjaf/eventstore v0.12.0 h1:ZdL+dZkIgBgIp5A3+3XLdPg/uucv5Tiws6DHzNfZG4M=
|
||||
github.com/fiatjaf/eventstore v0.12.0/go.mod h1:PxeYbZ3MsH0XLobANsp6c0cJjJYkfmBJ3TwrplFy/08=
|
||||
github.com/fiatjaf/khatru v0.9.1 h1:QjgEKKOxF+IZ5avztOSjgrWZ0+GPc/HVpdOlZAX5g74=
|
||||
github.com/fiatjaf/khatru v0.9.1/go.mod h1:WpsAMTmmN8d+ijdFu2RBHJkOVr4DPHMxqR1I/GutZCI=
|
||||
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
|
||||
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
|
||||
github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
|
||||
|
@ -30,6 +37,8 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm
|
|||
github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
|
||||
github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/greatroar/blobloom v0.8.0 h1:I9RlEkfqK9/6f1v9mFmDYegDQ/x0mISCpiNpAm23Pt4=
|
||||
github.com/greatroar/blobloom v0.8.0/go.mod h1:mjMJ1hh1wjGVfr93QIHJ6FfDNVrA0IELv8OvMHJxHKs=
|
||||
github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o=
|
||||
github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
|
@ -48,8 +57,8 @@ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWE
|
|||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU=
|
||||
github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y=
|
||||
github.com/nbd-wtf/go-nostr v0.38.2 h1:8PP+U8dx81jVEL89k/xMAejAlDeSDJ9ywNiyOj82so8=
|
||||
github.com/nbd-wtf/go-nostr v0.38.2/go.mod h1:TGKGj00BmJRXvRe0LlpDN3KKbELhhPXgBwUEhzu3Oq0=
|
||||
github.com/nbd-wtf/go-nostr v0.40.1 h1:+ogxn+CeRwjQSMSU161fOxKWtVWTEz/p++X4O8VKhMw=
|
||||
github.com/nbd-wtf/go-nostr v0.40.1/go.mod h1:FBa4FBJO7NuANvkeKSlrf0BIyxGufmrUbuelr6Q4Ick=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
|
@ -62,6 +71,12 @@ github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8=
|
|||
github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 h1:D0vL7YNisV2yqE55+q0lFuGse6U8lxlg7fYTctlT5Gc=
|
||||
github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38/go.mod h1:sM7Mt7uEoCeFSCBM+qBrqvEo+/9vdmj19wzp3yzUhmg=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
|
||||
|
@ -73,8 +88,8 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
|
|||
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.56.0 h1:bEZdJev/6LCBlpdORfrLu/WOZXXxvrUQSiyniuaoW8U=
|
||||
github.com/valyala/fasthttp v1.56.0/go.mod h1:sReBt3XZVnudxuLOx4J/fMrJVorWRiWY2koQKgABiVI=
|
||||
github.com/valyala/fasthttp v1.57.0 h1:Xw8SjWGEP/+wAAgyy5XTvgrWlOD1+TxbbvNADYCm1Tg=
|
||||
github.com/valyala/fasthttp v1.57.0/go.mod h1:h6ZBaPRlzpZ6O3H5t2gEk1Qi33+TmLvfwgLLp0t9CpE=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
|
||||
|
@ -86,5 +101,7 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|||
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
|
||||
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
|
27
vendor/github.com/bep/debounce/.gitignore
generated
vendored
Normal file
27
vendor/github.com/bep/debounce/.gitignore
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
cover.out
|
||||
nohup.out
|
21
vendor/github.com/bep/debounce/LICENSE
generated
vendored
Normal file
21
vendor/github.com/bep/debounce/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Bjørn Erik Pedersen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
35
vendor/github.com/bep/debounce/README.md
generated
vendored
Normal file
35
vendor/github.com/bep/debounce/README.md
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
# Go Debounce
|
||||
|
||||
[![Tests on Linux, MacOS and Windows](https://github.com/bep/debounce/workflows/Test/badge.svg)](https://github.com/bep/debounce/actions?query=workflow:Test)
|
||||
[![GoDoc](https://godoc.org/github.com/bep/debounce?status.svg)](https://godoc.org/github.com/bep/debounce)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/bep/debounce)](https://goreportcard.com/report/github.com/bep/debounce)
|
||||
[![codecov](https://codecov.io/gh/bep/debounce/branch/master/graph/badge.svg)](https://codecov.io/gh/bep/debounce)
|
||||
[![Release](https://img.shields.io/github/release/bep/debounce.svg?style=flat-square)](https://github.com/bep/debounce/releases/latest)
|
||||
|
||||
## Example
|
||||
|
||||
```go
|
||||
func ExampleNew() {
|
||||
var counter uint64
|
||||
|
||||
f := func() {
|
||||
atomic.AddUint64(&counter, 1)
|
||||
}
|
||||
|
||||
debounced := debounce.New(100 * time.Millisecond)
|
||||
|
||||
for i := 0; i < 3; i++ {
|
||||
for j := 0; j < 10; j++ {
|
||||
debounced(f)
|
||||
}
|
||||
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
}
|
||||
|
||||
c := int(atomic.LoadUint64(&counter))
|
||||
|
||||
fmt.Println("Counter is", c)
|
||||
// Output: Counter is 3
|
||||
}
|
||||
```
|
||||
|
43
vendor/github.com/bep/debounce/debounce.go
generated
vendored
Normal file
43
vendor/github.com/bep/debounce/debounce.go
generated
vendored
Normal file
|
@ -0,0 +1,43 @@
|
|||
// Copyright © 2019 Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>.
|
||||
//
|
||||
// Use of this source code is governed by an MIT-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package debounce provides a debouncer func. The most typical use case would be
|
||||
// the user typing a text into a form; the UI needs an update, but let's wait for
|
||||
// a break.
|
||||
package debounce
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// New returns a debounced function that takes another functions as its argument.
|
||||
// This function will be called when the debounced function stops being called
|
||||
// for the given duration.
|
||||
// The debounced function can be invoked with different functions, if needed,
|
||||
// the last one will win.
|
||||
func New(after time.Duration) func(f func()) {
|
||||
d := &debouncer{after: after}
|
||||
|
||||
return func(f func()) {
|
||||
d.add(f)
|
||||
}
|
||||
}
|
||||
|
||||
type debouncer struct {
|
||||
mu sync.Mutex
|
||||
after time.Duration
|
||||
timer *time.Timer
|
||||
}
|
||||
|
||||
func (d *debouncer) add(f func()) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if d.timer != nil {
|
||||
d.timer.Stop()
|
||||
}
|
||||
d.timer = time.AfterFunc(d.after, f)
|
||||
}
|
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
Normal file
22
vendor/github.com/cespare/xxhash/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
Normal file
50
vendor/github.com/cespare/xxhash/README.md
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
# xxhash
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
The API is very small, taking its cue from the other hashing packages in the
|
||||
standard library:
|
||||
|
||||
$ go doc github.com/cespare/xxhash !
|
||||
package xxhash // import "github.com/cespare/xxhash"
|
||||
|
||||
Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
at http://cyan4973.github.io/xxHash/.
|
||||
|
||||
func New() hash.Hash64
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
|
||||
This implementation provides a fast pure-Go implementation and an even faster
|
||||
assembly implementation for amd64.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64 against another popular Go XXH64 implementation,
|
||||
[github.com/OneOfOne/xxhash](https://github.com/OneOfOne/xxhash):
|
||||
|
||||
| input size | OneOfOne | cespare (purego) | cespare |
|
||||
| --- | --- | --- | --- |
|
||||
| 5 B | 416 MB/s | 720 MB/s | 872 MB/s |
|
||||
| 100 B | 3980 MB/s | 5013 MB/s | 5252 MB/s |
|
||||
| 4 KB | 12727 MB/s | 12999 MB/s | 13026 MB/s |
|
||||
| 10 MB | 9879 MB/s | 10775 MB/s | 10913 MB/s |
|
||||
|
||||
These numbers were generated with:
|
||||
|
||||
```
|
||||
$ go test -benchtime 10s -bench '/OneOfOne,'
|
||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,'
|
||||
$ go test -benchtime 10s -bench '/xxhash,'
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
Normal file
14
vendor/github.com/cespare/xxhash/rotate.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build !go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
// TODO(caleb): After Go 1.10 comes out, remove this fallback code.
|
||||
|
||||
func rol1(x uint64) uint64 { return (x << 1) | (x >> (64 - 1)) }
|
||||
func rol7(x uint64) uint64 { return (x << 7) | (x >> (64 - 7)) }
|
||||
func rol11(x uint64) uint64 { return (x << 11) | (x >> (64 - 11)) }
|
||||
func rol12(x uint64) uint64 { return (x << 12) | (x >> (64 - 12)) }
|
||||
func rol18(x uint64) uint64 { return (x << 18) | (x >> (64 - 18)) }
|
||||
func rol23(x uint64) uint64 { return (x << 23) | (x >> (64 - 23)) }
|
||||
func rol27(x uint64) uint64 { return (x << 27) | (x >> (64 - 27)) }
|
||||
func rol31(x uint64) uint64 { return (x << 31) | (x >> (64 - 31)) }
|
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
Normal file
14
vendor/github.com/cespare/xxhash/rotate19.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
// +build go1.9
|
||||
|
||||
package xxhash
|
||||
|
||||
import "math/bits"
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
Normal file
168
vendor/github.com/cespare/xxhash/xxhash.go
generated
vendored
Normal file
|
@ -0,0 +1,168 @@
|
|||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"hash"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
||||
// possible in the Go code is worth a small (but measurable) performance boost
|
||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
||||
// convenience in the Go code in a few places where we need to intentionally
|
||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
||||
// result overflows a uint64).
|
||||
var (
|
||||
prime1v = prime1
|
||||
prime2v = prime2
|
||||
prime3v = prime3
|
||||
prime4v = prime4
|
||||
prime5v = prime5
|
||||
)
|
||||
|
||||
type xxh struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total int
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new hash.Hash64 that implements the 64-bit xxHash algorithm.
|
||||
func New() hash.Hash64 {
|
||||
var x xxh
|
||||
x.Reset()
|
||||
return &x
|
||||
}
|
||||
|
||||
func (x *xxh) Reset() {
|
||||
x.n = 0
|
||||
x.total = 0
|
||||
x.v1 = prime1v + prime2
|
||||
x.v2 = prime2
|
||||
x.v3 = 0
|
||||
x.v4 = -prime1v
|
||||
}
|
||||
|
||||
func (x *xxh) Size() int { return 8 }
|
||||
func (x *xxh) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to x. It always returns len(b), nil.
|
||||
func (x *xxh) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
x.total += len(b)
|
||||
|
||||
if x.n+len(b) < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.n += len(b)
|
||||
return
|
||||
}
|
||||
|
||||
if x.n > 0 {
|
||||
// Finish off the partial block.
|
||||
copy(x.mem[x.n:], b)
|
||||
x.v1 = round(x.v1, u64(x.mem[0:8]))
|
||||
x.v2 = round(x.v2, u64(x.mem[8:16]))
|
||||
x.v3 = round(x.v3, u64(x.mem[16:24]))
|
||||
x.v4 = round(x.v4, u64(x.mem[24:32]))
|
||||
b = b[32-x.n:]
|
||||
x.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
b = writeBlocks(x, b)
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(x.mem[:], b)
|
||||
x.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (x *xxh) Sum(b []byte) []byte {
|
||||
s := x.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
func (x *xxh) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if x.total >= 32 {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = x.v3 + prime5
|
||||
}
|
||||
|
||||
h += uint64(x.total)
|
||||
|
||||
i, end := 0, x.n
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(x.mem[i:i+8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(x.mem[i:i+4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for i < end {
|
||||
h ^= uint64(x.mem[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
i++
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
Normal file
12
vendor/github.com/cespare/xxhash/xxhash_amd64.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte
|
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
Normal file
233
vendor/github.com/cespare/xxhash/xxhash_amd64.s
generated
vendored
Normal file
|
@ -0,0 +1,233 @@
|
|||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
// R9 v2
|
||||
// R10 v3
|
||||
// R11 v4
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ DX, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ R13, R8
|
||||
ADDQ R14, R8
|
||||
MOVQ R14, R9
|
||||
XORQ R10, R10
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
ROLQ $1, AX
|
||||
MOVQ R9, R12
|
||||
ROLQ $7, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R10, R12
|
||||
ROLQ $12, R12
|
||||
ADDQ R12, AX
|
||||
MOVQ R11, R12
|
||||
ROLQ $18, R12
|
||||
ADDQ R12, AX
|
||||
|
||||
mergeRound(AX, R8)
|
||||
mergeRound(AX, R9)
|
||||
mergeRound(AX, R10)
|
||||
mergeRound(AX, R11)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·prime5v(SB), AX
|
||||
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
ROLQ $23, AX
|
||||
IMULQ R14, AX
|
||||
ADDQ ·prime3v(SB), AX
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
MOVQ AX, R12
|
||||
SHRQ $33, R12
|
||||
XORQ R12, AX
|
||||
IMULQ R14, AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $29, R12
|
||||
XORQ R12, AX
|
||||
IMULQ ·prime3v(SB), AX
|
||||
MOVQ AX, R12
|
||||
SHRQ $32, R12
|
||||
XORQ R12, AX
|
||||
|
||||
MOVQ AX, ret+24(FP)
|
||||
RET
|
||||
|
||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
||||
// the x pointer.
|
||||
|
||||
// func writeBlocks(x *xxh, b []byte) []byte
|
||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-56
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ CX, ret_base+32(FP) // initialize return base pointer; see NOTE below
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from x.
|
||||
MOVQ x+0(FP), AX
|
||||
MOVQ 0(AX), R8 // v1
|
||||
MOVQ 8(AX), R9 // v2
|
||||
MOVQ 16(AX), R10 // v3
|
||||
MOVQ 24(AX), R11 // v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to x.
|
||||
MOVQ R8, 0(AX)
|
||||
MOVQ R9, 8(AX)
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// Construct return slice.
|
||||
// NOTE: It's important that we don't construct a slice that has a base
|
||||
// pointer off the end of the original slice, as in Go 1.7+ this will
|
||||
// cause runtime crashes. (See discussion in, for example,
|
||||
// https://github.com/golang/go/issues/16772.)
|
||||
// Therefore, we calculate the length/cap first, and if they're zero, we
|
||||
// keep the old base. This is what the compiler does as well if you
|
||||
// write code like
|
||||
// b = b[len(b):]
|
||||
|
||||
// New length is 32 - (CX - BX) -> BX+32 - CX.
|
||||
ADDQ $32, BX
|
||||
SUBQ CX, BX
|
||||
JZ afterSetBase
|
||||
|
||||
MOVQ CX, ret_base+32(FP)
|
||||
|
||||
afterSetBase:
|
||||
MOVQ BX, ret_len+40(FP)
|
||||
MOVQ BX, ret_cap+48(FP) // set cap == len
|
||||
|
||||
RET
|
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
Normal file
75
vendor/github.com/cespare/xxhash/xxhash_other.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
|||
// +build !amd64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// x := New()
|
||||
// x.Write(b)
|
||||
// return x.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := prime1v + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -prime1v
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
i, end := 0, len(b)
|
||||
for ; i+8 <= end; i += 8 {
|
||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if i+4 <= end {
|
||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
i += 4
|
||||
}
|
||||
for ; i < end; i++ {
|
||||
h ^= uint64(b[i]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(x *xxh, b []byte) []byte {
|
||||
v1, v2, v3, v4 := x.v1, x.v2, x.v3, x.v4
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
x.v1, x.v2, x.v3, x.v4 = v1, v2, v3, v4
|
||||
return b
|
||||
}
|
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/xxhash_safe.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
30
vendor/github.com/cespare/xxhash/xxhash_unsafe.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
//
|
||||
// TODO(caleb): Consider removing this if an optimization is ever added to make
|
||||
// it unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// TODO(caleb): We still have a function call; we could instead write Go/asm
|
||||
// copies of Sum64 for strings to squeeze out a bit more speed.
|
||||
func Sum64String(s string) uint64 {
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about this unsafe conversion.
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
2
vendor/github.com/fiatjaf/khatru/README.md
generated
vendored
2
vendor/github.com/fiatjaf/khatru/README.md
generated
vendored
|
@ -73,7 +73,7 @@ func main() {
|
|||
policies.ValidateKind,
|
||||
|
||||
// define your own policies
|
||||
policies.PreventLargeTags(80),
|
||||
policies.PreventLargeTags(100),
|
||||
func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if event.PubKey == "fa984bd7dbb282f07e16e7ae87b26a2a7b9b90b7246a44771f0cf5ae58018f52" {
|
||||
return true, "we don't allow this person to write here"
|
||||
|
|
1
vendor/github.com/fiatjaf/khatru/get-started.go
generated
vendored
1
vendor/github.com/fiatjaf/khatru/get-started.go
generated
vendored
|
@ -53,6 +53,7 @@ func (rl *Relay) Shutdown(ctx context.Context) {
|
|||
defer rl.clientsMutex.Unlock()
|
||||
for ws := range rl.clients {
|
||||
ws.conn.WriteControl(websocket.CloseMessage, nil, time.Now().Add(time.Second))
|
||||
ws.cancel()
|
||||
ws.conn.Close()
|
||||
}
|
||||
clear(rl.clients)
|
||||
|
|
95
vendor/github.com/fiatjaf/khatru/handlers.go
generated
vendored
95
vendor/github.com/fiatjaf/khatru/handlers.go
generated
vendored
|
@ -10,9 +10,13 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bep/debounce"
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip42"
|
||||
"github.com/nbd-wtf/go-nostr/nip77"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
|
@ -57,7 +61,9 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
conn: conn,
|
||||
Request: r,
|
||||
Challenge: hex.EncodeToString(challenge),
|
||||
negentropySessions: xsync.NewMapOf[string, *NegentropySession](),
|
||||
}
|
||||
ws.Context, ws.cancel = context.WithCancel(context.Background())
|
||||
|
||||
rl.clientsMutex.Lock()
|
||||
rl.clients[ws] = make([]listenerSpec, 0, 2)
|
||||
|
@ -77,7 +83,8 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
ticker.Stop()
|
||||
cancel()
|
||||
conn.Close()
|
||||
ws.cancel()
|
||||
ws.conn.Close()
|
||||
|
||||
rl.removeClientAndListeners(ws)
|
||||
}
|
||||
|
@ -85,10 +92,10 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
go func() {
|
||||
defer kill()
|
||||
|
||||
conn.SetReadLimit(rl.MaxMessageSize)
|
||||
conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
conn.SetPongHandler(func(string) error {
|
||||
conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
ws.conn.SetReadLimit(rl.MaxMessageSize)
|
||||
ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
ws.conn.SetPongHandler(func(string) error {
|
||||
ws.conn.SetReadDeadline(time.Now().Add(rl.PongWait))
|
||||
return nil
|
||||
})
|
||||
|
||||
|
@ -97,7 +104,7 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
for {
|
||||
typ, message, err := conn.ReadMessage()
|
||||
typ, message, err := ws.conn.ReadMessage()
|
||||
if err != nil {
|
||||
if websocket.IsUnexpectedCloseError(
|
||||
err,
|
||||
|
@ -109,6 +116,7 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
) {
|
||||
rl.Log.Printf("unexpected close error from %s: %v\n", r.Header.Get("X-Forwarded-For"), err)
|
||||
}
|
||||
ws.cancel()
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -120,9 +128,15 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
go func(message []byte) {
|
||||
envelope := nostr.ParseMessage(message)
|
||||
if envelope == nil {
|
||||
if !rl.Negentropy {
|
||||
// stop silently
|
||||
return
|
||||
}
|
||||
envelope = nip77.ParseNegMessage(message)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch env := envelope.(type) {
|
||||
case *nostr.EventEnvelope:
|
||||
|
@ -269,6 +283,75 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
|
|||
} else {
|
||||
ws.WriteJSON(nostr.OKEnvelope{EventID: env.Event.ID, OK: false, Reason: "error: failed to authenticate"})
|
||||
}
|
||||
case *nip77.OpenEnvelope:
|
||||
srl := rl
|
||||
if rl.getSubRelayFromFilter != nil {
|
||||
srl = rl.getSubRelayFromFilter(env.Filter)
|
||||
if !srl.Negentropy {
|
||||
// ignore
|
||||
return
|
||||
}
|
||||
}
|
||||
vec, err := srl.startNegentropySession(ctx, env.Filter)
|
||||
if err != nil {
|
||||
// fail everything if any filter is rejected
|
||||
reason := err.Error()
|
||||
if strings.HasPrefix(reason, "auth-required:") {
|
||||
RequestAuth(ctx)
|
||||
}
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: reason})
|
||||
return
|
||||
}
|
||||
|
||||
// reconcile to get the next message and return it
|
||||
neg := negentropy.New(vec, 1024*1024)
|
||||
out, err := neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()})
|
||||
return
|
||||
}
|
||||
ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out})
|
||||
|
||||
// if the message is not empty that means we'll probably have more reconciliation sessions, so store this
|
||||
if out != "" {
|
||||
deb := debounce.New(time.Second * 7)
|
||||
negSession := &NegentropySession{
|
||||
neg: neg,
|
||||
postponeClose: func() {
|
||||
deb(func() {
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
})
|
||||
},
|
||||
}
|
||||
negSession.postponeClose()
|
||||
|
||||
ws.negentropySessions.Store(env.SubscriptionID, negSession)
|
||||
}
|
||||
case *nip77.MessageEnvelope:
|
||||
negSession, ok := ws.negentropySessions.Load(env.SubscriptionID)
|
||||
if !ok {
|
||||
// bad luck, your request was destroyed
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: "CLOSED"})
|
||||
return
|
||||
}
|
||||
// reconcile to get the next message and return it
|
||||
out, err := negSession.neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
ws.WriteJSON(nip77.ErrorEnvelope{SubscriptionID: env.SubscriptionID, Reason: err.Error()})
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
return
|
||||
}
|
||||
ws.WriteJSON(nip77.MessageEnvelope{SubscriptionID: env.SubscriptionID, Message: out})
|
||||
|
||||
// if there is more reconciliation to do, postpone this
|
||||
if out != "" {
|
||||
negSession.postponeClose()
|
||||
} else {
|
||||
// otherwise we can just close it
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
}
|
||||
case *nip77.CloseEnvelope:
|
||||
ws.negentropySessions.Delete(env.SubscriptionID)
|
||||
}
|
||||
}(message)
|
||||
}
|
||||
|
|
50
vendor/github.com/fiatjaf/khatru/negentropy.go
generated
vendored
Normal file
50
vendor/github.com/fiatjaf/khatru/negentropy.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector"
|
||||
)
|
||||
|
||||
type NegentropySession struct {
|
||||
neg *negentropy.Negentropy
|
||||
postponeClose func()
|
||||
}
|
||||
|
||||
func (rl *Relay) startNegentropySession(ctx context.Context, filter nostr.Filter) (*vector.Vector, error) {
|
||||
// do the same overwrite/reject flow we do in normal REQs
|
||||
for _, ovw := range rl.OverwriteFilter {
|
||||
ovw(ctx, &filter)
|
||||
}
|
||||
if filter.LimitZero {
|
||||
return nil, fmt.Errorf("invalid limit 0")
|
||||
}
|
||||
for _, reject := range rl.RejectFilter {
|
||||
if reject, msg := reject(ctx, filter); reject {
|
||||
return nil, errors.New(nostr.NormalizeOKMessage(msg, "blocked"))
|
||||
}
|
||||
}
|
||||
|
||||
// fetch events and add them to a negentropy Vector store
|
||||
vec := vector.New()
|
||||
for _, query := range rl.QueryEvents {
|
||||
ch, err := query(ctx, filter)
|
||||
if err != nil {
|
||||
continue
|
||||
} else if ch == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for event := range ch {
|
||||
// since the goal here is to sync databases we won't do fancy stuff like overwrite events
|
||||
vec.Insert(event.CreatedAt, event.ID)
|
||||
}
|
||||
}
|
||||
vec.Seal()
|
||||
|
||||
return vec, nil
|
||||
}
|
7
vendor/github.com/fiatjaf/khatru/nip11.go
generated
vendored
7
vendor/github.com/fiatjaf/khatru/nip11.go
generated
vendored
|
@ -11,10 +11,13 @@ func (rl *Relay) HandleNIP11(w http.ResponseWriter, r *http.Request) {
|
|||
info := *rl.Info
|
||||
|
||||
if len(rl.DeleteEvent) > 0 {
|
||||
info.SupportedNIPs = append(info.SupportedNIPs, 9)
|
||||
info.AddSupportedNIP(9)
|
||||
}
|
||||
if len(rl.CountEvents) > 0 {
|
||||
info.SupportedNIPs = append(info.SupportedNIPs, 45)
|
||||
info.AddSupportedNIP(45)
|
||||
}
|
||||
if rl.Negentropy {
|
||||
info.AddSupportedNIP(77)
|
||||
}
|
||||
|
||||
for _, ovw := range rl.OverwriteRelayInformation {
|
||||
|
|
6
vendor/github.com/fiatjaf/khatru/policies/events.go
generated
vendored
6
vendor/github.com/fiatjaf/khatru/policies/events.go
generated
vendored
|
@ -67,11 +67,15 @@ func PreventLargeTags(maxTagValueLen int) func(context.Context, *nostr.Event) (b
|
|||
|
||||
// RestrictToSpecifiedKinds returns a function that can be used as a RejectFilter that will reject
|
||||
// any events with kinds different than the specified ones.
|
||||
func RestrictToSpecifiedKinds(kinds ...uint16) func(context.Context, *nostr.Event) (bool, string) {
|
||||
func RestrictToSpecifiedKinds(allowEphemeral bool, kinds ...uint16) func(context.Context, *nostr.Event) (bool, string) {
|
||||
// sort the kinds in increasing order
|
||||
slices.Sort(kinds)
|
||||
|
||||
return func(ctx context.Context, event *nostr.Event) (reject bool, msg string) {
|
||||
if allowEphemeral && event.IsEphemeral() {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
if _, allowed := slices.BinarySearch(kinds, uint16(event.Kind)); allowed {
|
||||
return false, ""
|
||||
}
|
||||
|
|
5
vendor/github.com/fiatjaf/khatru/relay.go
generated
vendored
5
vendor/github.com/fiatjaf/khatru/relay.go
generated
vendored
|
@ -46,7 +46,7 @@ func NewRelay() *Relay {
|
|||
type Relay struct {
|
||||
ServiceURL string
|
||||
|
||||
// these structs keeps track of all the things that can be customized when handling events or requests
|
||||
// hooks that will be called at various times
|
||||
RejectEvent []func(ctx context.Context, event *nostr.Event) (reject bool, msg string)
|
||||
OverwriteDeletionOutcome []func(ctx context.Context, target *nostr.Event, deletion *nostr.Event) (acceptDeletion bool, msg string)
|
||||
StoreEvent []func(ctx context.Context, event *nostr.Event) error
|
||||
|
@ -90,6 +90,9 @@ type Relay struct {
|
|||
listeners []listener
|
||||
clientsMutex sync.Mutex
|
||||
|
||||
// set this to true to support negentropy
|
||||
Negentropy bool
|
||||
|
||||
// in case you call Server.Start
|
||||
Addr string
|
||||
serveMux *http.ServeMux
|
||||
|
|
9
vendor/github.com/fiatjaf/khatru/websocket.go
generated
vendored
9
vendor/github.com/fiatjaf/khatru/websocket.go
generated
vendored
|
@ -1,10 +1,12 @@
|
|||
package khatru
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/fasthttp/websocket"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type WebSocket struct {
|
||||
|
@ -14,11 +16,18 @@ type WebSocket struct {
|
|||
// original request
|
||||
Request *http.Request
|
||||
|
||||
// this Context will be canceled whenever the connection is closed from the client side or server-side.
|
||||
Context context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
// nip42
|
||||
Challenge string
|
||||
AuthedPublicKey string
|
||||
Authed chan struct{}
|
||||
|
||||
// nip77
|
||||
negentropySessions *xsync.MapOf[string, *NegentropySession]
|
||||
|
||||
authLock sync.Mutex
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/greatroar/blobloom/.gitattributes
generated
vendored
Normal file
2
vendor/github.com/greatroar/blobloom/.gitattributes
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
# Work around https://github.com/golang/go/issues/52268.
|
||||
**/testdata/fuzz/*/* eol=lf
|
25
vendor/github.com/greatroar/blobloom/.golangci.yml
generated
vendored
Normal file
25
vendor/github.com/greatroar/blobloom/.golangci.yml
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
# Configuration for golangci-lint.
|
||||
|
||||
linters:
|
||||
disable:
|
||||
- asciicheck
|
||||
enable:
|
||||
- gocognit
|
||||
- gocyclo
|
||||
- godot
|
||||
- gofumpt
|
||||
- lll
|
||||
- misspell
|
||||
- nakedret
|
||||
- thelper
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
errcheck
|
||||
|
||||
linters-settings:
|
||||
govet:
|
||||
enable:
|
||||
- atomicalign
|
202
vendor/github.com/greatroar/blobloom/LICENSE
generated
vendored
Normal file
202
vendor/github.com/greatroar/blobloom/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
86
vendor/github.com/greatroar/blobloom/README.md
generated
vendored
Normal file
86
vendor/github.com/greatroar/blobloom/README.md
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
Blobloom
|
||||
========
|
||||
|
||||
A Bloom filter package for Go (golang) with no compile-time dependencies.
|
||||
|
||||
This package implements a version of Bloom filters called [blocked Bloom filters](
|
||||
https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf),
|
||||
which get a speed boost from using the CPU cache more efficiently
|
||||
than regular Bloom filters.
|
||||
|
||||
Unlike most Bloom filter packages for Go,
|
||||
this one doesn't run a hash function for you.
|
||||
That's a benefit if you need a custom hash
|
||||
or you want pick the fastest one for an application.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
To construct a Bloom filter, you need to know how many keys you want to store
|
||||
and what rate of false positives you find acceptable.
|
||||
|
||||
f := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: nkeys, // Expected number of keys.
|
||||
FPRate: 1e-4, // Accept one false positive per 10,000 lookups.
|
||||
})
|
||||
|
||||
To add a key:
|
||||
|
||||
// import "github.com/cespare/xxhash/v2"
|
||||
f.Add(xxhash.Sum64(key))
|
||||
|
||||
To test for the presence of a key in the filter:
|
||||
|
||||
if f.Has(xxhash.Sum64(key)) {
|
||||
// Key is probably in f.
|
||||
} else {
|
||||
// Key is certainly not in f.
|
||||
}
|
||||
|
||||
The false positive rate is defined as usual:
|
||||
if you look up 10,000 random keys in a Bloom filter filled to capacity,
|
||||
an expected one of those is a false positive for FPRate 1e-4.
|
||||
|
||||
See the examples/ directory and the
|
||||
[package documentation](https://pkg.go.dev/github.com/greatroar/blobloom)
|
||||
for further usage information and examples.
|
||||
|
||||
Hash functions
|
||||
--------------
|
||||
|
||||
Blobloom does not provide hash functions. Instead, it requires client code to
|
||||
represent each key as a single 64-bit hash value, leaving it to the user to
|
||||
pick the right hash function for a particular problem. Here are some general
|
||||
suggestions:
|
||||
|
||||
* If you use Bloom filters to speed up access to a key-value store, you might
|
||||
want to look at [xxh3](https://github.com/zeebo/xxh3) or [xxhash](
|
||||
https://github.com/cespare/xxhash).
|
||||
* If your keys are cryptographic hashes, consider using the first 8 bytes of those hashes.
|
||||
* If you use Bloom filters to make probabilistic decisions, a randomized hash
|
||||
function such as [maphash](https://golang.org/pkg/hash/maphash) should prevent
|
||||
the same false positives occurring every time.
|
||||
|
||||
When evaluating a hash function, or designing a custom one,
|
||||
make sure it is a 64-bit hash that properly mixes its input bits.
|
||||
Casting a 32-bit hash to uint64 gives suboptimal results.
|
||||
So does passing integer keys in without running them through a mixing function.
|
||||
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
Copyright © 2020-2023 the Blobloom authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
279
vendor/github.com/greatroar/blobloom/bloomfilter.go
generated
vendored
Normal file
279
vendor/github.com/greatroar/blobloom/bloomfilter.go
generated
vendored
Normal file
|
@ -0,0 +1,279 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package blobloom implements blocked Bloom filters.
|
||||
//
|
||||
// Blocked Bloom filters are an approximate set data structure: if a key has
|
||||
// been added to a filter, a lookup of that key returns true, but if the key
|
||||
// has not been added, there is a non-zero probability that the lookup still
|
||||
// returns true (a false positive). False negatives are impossible: if the
|
||||
// lookup for a key returns false, that key has not been added.
|
||||
//
|
||||
// In this package, keys are represented exclusively as hashes. Client code
|
||||
// is responsible for supplying a 64-bit hash value.
|
||||
//
|
||||
// Compared to standard Bloom filters, blocked Bloom filters use the CPU
|
||||
// cache more efficiently. A blocked Bloom filter is an array of ordinary
|
||||
// Bloom filters of fixed size BlockBits (the blocks). The lower half of the
|
||||
// hash selects the block to use.
|
||||
//
|
||||
// To achieve the same false positive rate (FPR) as a standard Bloom filter,
|
||||
// a blocked Bloom filter requires more memory. For an FPR of at most 2e-6
|
||||
// (two in a million), it uses ~20% more memory. At 1e-10, the space required
|
||||
// is double that of standard Bloom filter.
|
||||
//
|
||||
// For more details, see the 2010 paper by Putze, Sanders and Singler,
|
||||
// https://algo2.iti.kit.edu/documents/cacheefficientbloomfilters-jea.pdf.
|
||||
package blobloom
|
||||
|
||||
import "math"
|
||||
|
||||
// BlockBits is the number of bits per block and the minimum number of bits
|
||||
// in a Filter.
|
||||
//
|
||||
// The value of this constant is chosen to match the L1 cache line size
|
||||
// of popular architectures (386, amd64, arm64).
|
||||
const BlockBits = 512
|
||||
|
||||
// MaxBits is the maximum number of bits supported by a Filter.
|
||||
const MaxBits = BlockBits << 32 // 256GiB.
|
||||
|
||||
// A Filter is a blocked Bloom filter.
|
||||
type Filter struct {
|
||||
b []block // Shards.
|
||||
k int // Number of hash functions required.
|
||||
}
|
||||
|
||||
// New constructs a Bloom filter with given numbers of bits and hash functions.
|
||||
//
|
||||
// The number of bits should be at least BlockBits; smaller values are silently
|
||||
// increased.
|
||||
//
|
||||
// The number of hashes reflects the number of hashes synthesized from the
|
||||
// single hash passed in by the client. It is silently increased to two if
|
||||
// a lower value is given.
|
||||
func New(nbits uint64, nhashes int) *Filter {
|
||||
nbits, nhashes = fixBitsAndHashes(nbits, nhashes)
|
||||
|
||||
return &Filter{
|
||||
b: make([]block, nbits/BlockBits),
|
||||
k: nhashes,
|
||||
}
|
||||
}
|
||||
|
||||
func fixBitsAndHashes(nbits uint64, nhashes int) (uint64, int) {
|
||||
if nbits < 1 {
|
||||
nbits = BlockBits
|
||||
}
|
||||
if nhashes < 2 {
|
||||
nhashes = 2
|
||||
}
|
||||
if nbits > MaxBits {
|
||||
panic("nbits exceeds MaxBits")
|
||||
}
|
||||
|
||||
// Round nbits up to a multiple of BlockBits.
|
||||
if nbits%BlockBits != 0 {
|
||||
nbits += BlockBits - nbits%BlockBits
|
||||
}
|
||||
|
||||
return nbits, nhashes
|
||||
}
|
||||
|
||||
// Add insert a key with hash value h into f.
|
||||
func (f *Filter) Add(h uint64) {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
b.setbit(h1)
|
||||
}
|
||||
}
|
||||
|
||||
// log(1 - 1/BlockBits) computed with 128 bits precision.
|
||||
// Note that this is extremely close to -1/BlockBits,
|
||||
// which is what Wikipedia would have us use:
|
||||
// https://en.wikipedia.org/wiki/Bloom_filter#Approximating_the_number_of_items_in_a_Bloom_filter.
|
||||
const log1minus1divBlockbits = -0.0019550348358033505576274922418668121377
|
||||
|
||||
// Cardinality estimates the number of distinct keys added to f.
|
||||
//
|
||||
// The estimate is most reliable when f is filled to roughly its capacity.
|
||||
// It gets worse as f gets more densely filled. When one of the blocks is
|
||||
// entirely filled, the estimate becomes +Inf.
|
||||
//
|
||||
// The return value is the maximum likelihood estimate of Papapetrou, Siberski
|
||||
// and Nejdl, summed over the blocks
|
||||
// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf).
|
||||
func (f *Filter) Cardinality() float64 {
|
||||
return cardinality(f.k, f.b, onescount)
|
||||
}
|
||||
|
||||
func cardinality(nhashes int, b []block, onescount func(*block) int) float64 {
|
||||
k := float64(nhashes - 1)
|
||||
|
||||
// The probability of some bit not being set in a single insertion is
|
||||
// p0 = (1-1/BlockBits)^k.
|
||||
//
|
||||
// logProb0Inv = 1 / log(p0) = 1 / (k*log(1-1/BlockBits)).
|
||||
logProb0Inv := 1 / (k * log1minus1divBlockbits)
|
||||
|
||||
var n float64
|
||||
for i := range b {
|
||||
ones := onescount(&b[i])
|
||||
if ones == 0 {
|
||||
continue
|
||||
}
|
||||
n += math.Log1p(-float64(ones) / BlockBits)
|
||||
}
|
||||
return n * logProb0Inv
|
||||
}
|
||||
|
||||
// Clear resets f to its empty state.
|
||||
func (f *Filter) Clear() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
f.b[i] = block{}
|
||||
}
|
||||
}
|
||||
|
||||
// Empty reports whether f contains no keys.
|
||||
func (f *Filter) Empty() bool {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
if f.b[i] != (block{}) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Equals returns true if f and g contain the same keys (in terms of Has)
|
||||
// when used with the same hash function.
|
||||
func (f *Filter) Equals(g *Filter) bool {
|
||||
if g.k != f.k || len(g.b) != len(f.b) {
|
||||
return false
|
||||
}
|
||||
for i := range g.b {
|
||||
if f.b[i] != g.b[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Fill set f to a completely full filter.
|
||||
// After Fill, Has returns true for any key.
|
||||
func (f *Filter) Fill() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
f.b[i][j] = ^uint32(0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Has reports whether a key with hash value h has been added.
|
||||
// It may return a false positive.
|
||||
func (f *Filter) Has(h uint64) bool {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
if !b.getbit(h1) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// doublehash generates the hash values to use in iteration i of
|
||||
// enhanced double hashing from the values h1, h2 of the previous iteration.
|
||||
// See https://www.ccs.neu.edu/home/pete/pub/bloom-filters-verification.pdf.
|
||||
func doublehash(h1, h2 uint32, i int) (uint32, uint32) {
|
||||
h1 = h1 + h2
|
||||
h2 = h2 + uint32(i)
|
||||
return h1, h2
|
||||
}
|
||||
|
||||
// NumBits returns the number of bits of f.
|
||||
func (f *Filter) NumBits() uint64 {
|
||||
return BlockBits * uint64(len(f.b))
|
||||
}
|
||||
|
||||
func checkBinop(f, g *Filter) {
|
||||
if len(f.b) != len(g.b) {
|
||||
panic("Bloom filters do not have the same number of bits")
|
||||
}
|
||||
if f.k != g.k {
|
||||
panic("Bloom filters do not have the same number of hash functions")
|
||||
}
|
||||
}
|
||||
|
||||
// Intersect sets f to the intersection of f and g.
|
||||
//
|
||||
// Intersect panics when f and g do not have the same number of bits and
|
||||
// hash functions. Both Filters must be using the same hash function(s),
|
||||
// but Intersect cannot check this.
|
||||
//
|
||||
// Since Bloom filters may return false positives, Has may return true for
|
||||
// a key that was not in both f and g.
|
||||
//
|
||||
// After Intersect, the estimates from f.Cardinality and f.FPRate should be
|
||||
// considered unreliable.
|
||||
func (f *Filter) Intersect(g *Filter) {
|
||||
checkBinop(f, g)
|
||||
f.intersect(g)
|
||||
}
|
||||
|
||||
// Union sets f to the union of f and g.
|
||||
//
|
||||
// Union panics when f and g do not have the same number of bits and
|
||||
// hash functions. Both Filters must be using the same hash function(s),
|
||||
// but Union cannot check this.
|
||||
func (f *Filter) Union(g *Filter) {
|
||||
checkBinop(f, g)
|
||||
f.union(g)
|
||||
}
|
||||
|
||||
const (
|
||||
wordSize = 32
|
||||
blockWords = BlockBits / wordSize
|
||||
)
|
||||
|
||||
// A block is a fixed-size Bloom filter, used as a shard of a Filter.
|
||||
type block [blockWords]uint32
|
||||
|
||||
func getblock(b []block, h2 uint32) *block {
|
||||
i := reducerange(h2, uint32(len(b)))
|
||||
return &b[i]
|
||||
}
|
||||
|
||||
// reducerange maps i to an integer in the range [0,n).
|
||||
// https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
|
||||
func reducerange(i, n uint32) uint32 {
|
||||
return uint32((uint64(i) * uint64(n)) >> 32)
|
||||
}
|
||||
|
||||
// getbit reports whether bit (i modulo BlockBits) is set.
|
||||
func (b *block) getbit(i uint32) bool {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
x := (*b)[(i/wordSize)%blockWords] & bit
|
||||
return x != 0
|
||||
}
|
||||
|
||||
// setbit sets bit (i modulo BlockBits) of b.
|
||||
func (b *block) setbit(i uint32) {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
(*b)[(i/wordSize)%blockWords] |= bit
|
||||
}
|
246
vendor/github.com/greatroar/blobloom/io.go
generated
vendored
Normal file
246
vendor/github.com/greatroar/blobloom/io.go
generated
vendored
Normal file
|
@ -0,0 +1,246 @@
|
|||
// Copyright 2023 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
const maxCommentLen = 44
|
||||
|
||||
// Dump writes f to w, with an optional comment string, in the binary format
|
||||
// that a Loader accepts. It returns the number of bytes written to w.
|
||||
//
|
||||
// The comment may contain arbitrary data, within the limits layed out by the
|
||||
// format description. It can be used to record the hash function to be used
|
||||
// with a Filter.
|
||||
func Dump(w io.Writer, f *Filter, comment string) (int64, error) {
|
||||
return dump(w, f.b, f.k, comment)
|
||||
}
|
||||
|
||||
// DumpSync is like Dump, but for SyncFilters.
|
||||
//
|
||||
// If other goroutines are simultaneously modifying f,
|
||||
// their modifications may not be reflected in the dump.
|
||||
// Separate synchronization is required to prevent this.
|
||||
//
|
||||
// The format produced is the same as Dump's. The fact that
|
||||
// the argument is a SyncFilter is not encoded in the dump.
|
||||
func DumpSync(w io.Writer, f *SyncFilter, comment string) (n int64, err error) {
|
||||
return dump(w, f.b, f.k, comment)
|
||||
}
|
||||
|
||||
func dump(w io.Writer, b []block, nhashes int, comment string) (n int64, err error) {
|
||||
switch {
|
||||
case len(b) == 0 || nhashes == 0:
|
||||
err = errors.New("blobloom: won't dump uninitialized Filter")
|
||||
case len(comment) > maxCommentLen:
|
||||
err = fmt.Errorf("blobloom: comment of length %d too long", len(comment))
|
||||
case strings.IndexByte(comment, 0) != -1:
|
||||
err = fmt.Errorf("blobloom: comment %q contains zero byte", len(comment))
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var buf [64]byte
|
||||
copy(buf[:8], "blobloom")
|
||||
// As documented in the comment for Loader, we store one less than the
|
||||
// number of blocks. This way, we can use the otherwise invalid value 0
|
||||
// and store 2³² blocks instead of at most 2³²-1.
|
||||
binary.LittleEndian.PutUint32(buf[12:], uint32(len(b)-1))
|
||||
binary.LittleEndian.PutUint32(buf[16:], uint32(nhashes))
|
||||
copy(buf[20:], comment)
|
||||
|
||||
k, err := w.Write(buf[:])
|
||||
n = int64(k)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
for i := range b {
|
||||
for j := range b[i] {
|
||||
x := atomic.LoadUint32(&b[i][j])
|
||||
binary.LittleEndian.PutUint32(buf[4*j:], x)
|
||||
}
|
||||
k, err = w.Write(buf[:])
|
||||
n += int64(k)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// A Loader reads a Filter or SyncFilter from an io.Reader.
|
||||
//
|
||||
// A Loader accepts the binary format produced by Dump. The format starts
|
||||
// with a 64-byte header:
|
||||
// - the string "blobloom", in ASCII;
|
||||
// - a four-byte version number, which must be zero;
|
||||
// - the number of Bloom filter blocks, minus one, as a 32-bit integer;
|
||||
// - the number of hashes, as a 32-bit integer;
|
||||
// - a comment of at most 44 non-zero bytes, padded to 44 bytes with zeros.
|
||||
//
|
||||
// After the header come the 512-bit blocks, divided into sixteen 32-bit limbs.
|
||||
// All integers are little-endian.
|
||||
type Loader struct {
|
||||
buf [64]byte
|
||||
r io.Reader
|
||||
err error
|
||||
|
||||
Comment string // Comment field. Filled in by NewLoader.
|
||||
nblocks uint64
|
||||
nhashes int
|
||||
}
|
||||
|
||||
// NewLoader parses the format header from r and returns a Loader
|
||||
// that can be used to load a Filter from it.
|
||||
func NewLoader(r io.Reader) (*Loader, error) {
|
||||
l := &Loader{r: r}
|
||||
|
||||
err := l.fillbuf()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
version := binary.LittleEndian.Uint32(l.buf[8:])
|
||||
// See comment in dump for the +1.
|
||||
l.nblocks = 1 + uint64(binary.LittleEndian.Uint32(l.buf[12:]))
|
||||
l.nhashes = int(binary.LittleEndian.Uint32(l.buf[16:]))
|
||||
comment := l.buf[20:]
|
||||
|
||||
switch {
|
||||
case string(l.buf[:8]) != "blobloom":
|
||||
err = errors.New("blobloom: not a Bloom filter dump")
|
||||
case version != 0:
|
||||
err = errors.New("blobloom: unsupported dump version")
|
||||
case l.nhashes == 0:
|
||||
err = errors.New("blobloom: zero hashes in Bloom filter dump")
|
||||
}
|
||||
if err == nil {
|
||||
comment, err = checkComment(comment)
|
||||
l.Comment = string(comment)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
l = nil
|
||||
}
|
||||
return l, err
|
||||
}
|
||||
|
||||
// Load sets f to the union of f and the Loader's filter, then returns f.
|
||||
// If f is nil, a new Filter of the appropriate size is constructed.
|
||||
//
|
||||
// If f is not nil and an error occurs while reading from the Loader,
|
||||
// f may end up in an inconsistent state.
|
||||
func (l *Loader) Load(f *Filter) (*Filter, error) {
|
||||
if f == nil {
|
||||
nbits := BlockBits * l.nblocks
|
||||
if nbits > MaxBits {
|
||||
return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks)
|
||||
}
|
||||
f = New(nbits, int(l.nhashes))
|
||||
} else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range f.b {
|
||||
if err := l.fillbuf(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for j := range f.b[i] {
|
||||
f.b[i][j] |= binary.LittleEndian.Uint32(l.buf[4*j:])
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Load sets f to the union of f and the Loader's filter, then returns f.
|
||||
// If f is nil, a new SyncFilter of the appropriate size is constructed.
|
||||
// Else, LoadSync may run concurrently with other modifications to f.
|
||||
//
|
||||
// If f is not nil and an error occurs while reading from the Loader,
|
||||
// f may end up in an inconsistent state.
|
||||
func (l *Loader) LoadSync(f *SyncFilter) (*SyncFilter, error) {
|
||||
if f == nil {
|
||||
nbits := BlockBits * l.nblocks
|
||||
if nbits > MaxBits {
|
||||
return nil, fmt.Errorf("blobloom: %d blocks is too large", l.nblocks)
|
||||
}
|
||||
f = NewSync(nbits, int(l.nhashes))
|
||||
} else if err := l.checkBitsAndHashes(len(f.b), f.k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for i := range f.b {
|
||||
if err := l.fillbuf(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for j := range f.b[i] {
|
||||
p := &f.b[i][j]
|
||||
x := binary.LittleEndian.Uint32(l.buf[4*j:])
|
||||
|
||||
for {
|
||||
old := atomic.LoadUint32(p)
|
||||
if atomic.CompareAndSwapUint32(p, old, old|x) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (l *Loader) checkBitsAndHashes(nblocks, nhashes int) error {
|
||||
switch {
|
||||
case nblocks != int(l.nblocks):
|
||||
return fmt.Errorf("blobloom: Filter has %d blocks, but dump has %d", nblocks, l.nblocks)
|
||||
case nhashes != l.nhashes:
|
||||
return fmt.Errorf("blobloom: Filter has %d hashes, but dump has %d", nhashes, l.nhashes)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *Loader) fillbuf() error {
|
||||
_, err := io.ReadFull(l.r, l.buf[:])
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func checkComment(p []byte) ([]byte, error) {
|
||||
eos := bytes.IndexByte(p, 0)
|
||||
if eos != -1 {
|
||||
tail := p[eos+1:]
|
||||
if !bytes.Equal(tail, make([]byte, len(tail))) {
|
||||
return nil, fmt.Errorf("blobloom: comment block %q contains zero byte", p)
|
||||
}
|
||||
p = p[:eos]
|
||||
}
|
||||
return p, nil
|
||||
}
|
201
vendor/github.com/greatroar/blobloom/optimize.go
generated
vendored
Normal file
201
vendor/github.com/greatroar/blobloom/optimize.go
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
|||
// Copyright 2020 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import "math"
|
||||
|
||||
// A Config holds parameters for Optimize or NewOptimized.
|
||||
type Config struct {
|
||||
// Trigger the "contains filtered or unexported fields" message for
|
||||
// forward compatibility and force the caller to use named fields.
|
||||
_ struct{}
|
||||
|
||||
// Capacity is the expected number of distinct keys to be added.
|
||||
// More keys can always be added, but the false positive rate can be
|
||||
// expected to drop below FPRate if their number exceeds the Capacity.
|
||||
Capacity uint64
|
||||
|
||||
// Desired lower bound on the false positive rate when the Bloom filter
|
||||
// has been filled to its capacity. FPRate must be between zero
|
||||
// (exclusive) and one (inclusive).
|
||||
FPRate float64
|
||||
|
||||
// Maximum size of the Bloom filter in bits. Zero means the global
|
||||
// MaxBits constant. A value less than BlockBits means BlockBits.
|
||||
MaxBits uint64
|
||||
}
|
||||
|
||||
// NewOptimized is shorthand for New(Optimize(config)).
|
||||
func NewOptimized(config Config) *Filter {
|
||||
return New(Optimize(config))
|
||||
}
|
||||
|
||||
// NewSyncOptimized is shorthand for New(Optimize(config)).
|
||||
func NewSyncOptimized(config Config) *SyncFilter {
|
||||
return NewSync(Optimize(config))
|
||||
}
|
||||
|
||||
// Optimize returns numbers of keys and hash functions that achieve the
|
||||
// desired false positive described by config.
|
||||
//
|
||||
// Optimize panics when config.FPRate is invalid.
|
||||
//
|
||||
// The estimated number of bits is imprecise for false positives rates below
|
||||
// ca. 1e-15.
|
||||
func Optimize(config Config) (nbits uint64, nhashes int) {
|
||||
n := float64(config.Capacity)
|
||||
p := config.FPRate
|
||||
|
||||
if p <= 0 || p > 1 {
|
||||
panic("false positive rate for a Bloom filter must be > 0, <= 1")
|
||||
}
|
||||
if n == 0 {
|
||||
// Assume the client wants to add at least one key; log2(0) = -inf.
|
||||
n = 1
|
||||
}
|
||||
|
||||
// The optimal nbits/n is c = -log2(p) / ln(2) for a vanilla Bloom filter.
|
||||
c := math.Ceil(-math.Log2(p) / math.Ln2)
|
||||
if c < float64(len(correctC)) {
|
||||
c = float64(correctC[int(c)])
|
||||
} else {
|
||||
// We can't achieve the desired FPR. Just triple the number of bits.
|
||||
c *= 3
|
||||
}
|
||||
nbits = uint64(c * n)
|
||||
|
||||
// Round up to a multiple of BlockBits.
|
||||
if nbits%BlockBits != 0 {
|
||||
nbits += BlockBits - nbits%BlockBits
|
||||
}
|
||||
|
||||
var maxbits uint64 = MaxBits
|
||||
if config.MaxBits != 0 && config.MaxBits < maxbits {
|
||||
maxbits = config.MaxBits
|
||||
if maxbits < BlockBits {
|
||||
maxbits = BlockBits
|
||||
}
|
||||
}
|
||||
if nbits > maxbits {
|
||||
nbits = maxbits
|
||||
// Round down to a multiple of BlockBits.
|
||||
nbits -= nbits % BlockBits
|
||||
}
|
||||
|
||||
// The corresponding optimal number of hash functions is k = c * log(2).
|
||||
// Try rounding up and down to see which rounding is better.
|
||||
c = float64(nbits) / n
|
||||
k := c * math.Ln2
|
||||
if k < 1 {
|
||||
nhashes = 1
|
||||
return nbits, nhashes
|
||||
}
|
||||
|
||||
ceilK, floorK := math.Floor(k), math.Ceil(k)
|
||||
if ceilK == floorK {
|
||||
return nbits, int(ceilK)
|
||||
}
|
||||
|
||||
fprCeil, _ := fpRate(c, math.Ceil(k))
|
||||
fprFloor, _ := fpRate(c, math.Floor(k))
|
||||
if fprFloor < fprCeil {
|
||||
k = floorK
|
||||
} else {
|
||||
k = ceilK
|
||||
}
|
||||
|
||||
return nbits, int(k)
|
||||
}
|
||||
|
||||
// correctC maps c = m/n for a vanilla Bloom filter to the c' for a
|
||||
// blocked Bloom filter.
|
||||
//
|
||||
// This is Putze et al.'s Table I, extended down to zero.
|
||||
// For c > 34, the values become huge and are hard to compute.
|
||||
var correctC = []byte{
|
||||
1, 1, 2, 4, 5,
|
||||
6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 20, 21, 23,
|
||||
25, 26, 28, 30, 32, 35, 38, 40, 44, 48, 51, 58, 64, 74, 90,
|
||||
}
|
||||
|
||||
// FPRate computes an estimate of the false positive rate of a Bloom filter
|
||||
// after nkeys distinct keys have been added.
|
||||
func FPRate(nkeys, nbits uint64, nhashes int) float64 {
|
||||
if nkeys == 0 {
|
||||
return 0
|
||||
}
|
||||
p, _ := fpRate(float64(nbits)/float64(nkeys), float64(nhashes))
|
||||
return p
|
||||
}
|
||||
|
||||
func fpRate(c, k float64) (p float64, iter int) {
|
||||
switch {
|
||||
case c == 0:
|
||||
panic("0 bits per key is too few")
|
||||
case k == 0:
|
||||
panic("0 hashes is too few")
|
||||
}
|
||||
|
||||
// Putze et al.'s Equation (3).
|
||||
//
|
||||
// The Poisson distribution has a single spike around its mean
|
||||
// BlockBits/c that gets slimmer and further away from zero as c tends
|
||||
// to zero (the Bloom filter gets more filled). We start at the mean,
|
||||
// then add terms left and right of it until their relative contribution
|
||||
// drops below ε.
|
||||
const ε = 1e-9
|
||||
mean := BlockBits / c
|
||||
|
||||
// Ceil to make sure we start at one, not zero.
|
||||
i := math.Ceil(mean)
|
||||
p = math.Exp(logPoisson(mean, i) + logFprBlock(BlockBits/i, k))
|
||||
|
||||
for j := i - 1; j > 0; j-- {
|
||||
add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k))
|
||||
p += add
|
||||
iter++
|
||||
if add/p < ε {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for j := i + 1; ; j++ {
|
||||
add := math.Exp(logPoisson(mean, j) + logFprBlock(BlockBits/j, k))
|
||||
p += add
|
||||
iter++
|
||||
if add/p < ε {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return p, iter
|
||||
}
|
||||
|
||||
// FPRate computes an estimate of f's false positive rate after nkeys distinct
|
||||
// keys have been added.
|
||||
func (f *Filter) FPRate(nkeys uint64) float64 {
|
||||
return FPRate(nkeys, f.NumBits(), f.k)
|
||||
}
|
||||
|
||||
// Log of the FPR of a single block, FPR = (1 - exp(-k/c))^k.
|
||||
func logFprBlock(c, k float64) float64 {
|
||||
return k * math.Log1p(-math.Exp(-k/c))
|
||||
}
|
||||
|
||||
// Log of the Poisson distribution's pmf.
|
||||
func logPoisson(λ, k float64) float64 {
|
||||
lg, _ := math.Lgamma(k + 1)
|
||||
return k*math.Log(λ) - λ - lg
|
||||
}
|
148
vendor/github.com/greatroar/blobloom/setop_64bit.go
generated
vendored
Normal file
148
vendor/github.com/greatroar/blobloom/setop_64bit.go
generated
vendored
Normal file
|
@ -0,0 +1,148 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (amd64 || arm64) && !nounsafe
|
||||
// +build amd64 arm64
|
||||
// +build !nounsafe
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Block reinterpreted as array of uint64.
|
||||
type block64 [BlockBits / 64]uint64
|
||||
|
||||
func (f *Filter) intersect(g *Filter) {
|
||||
a, b := f.b, g.b
|
||||
for len(a) >= 2 && len(b) >= 2 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
|
||||
p = (*block64)(unsafe.Pointer(&a[1]))
|
||||
q = (*block64)(unsafe.Pointer(&b[1]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
|
||||
a, b = a[2:], b[2:]
|
||||
}
|
||||
|
||||
if len(a) > 0 && len(b) > 0 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] &= q[0]
|
||||
p[1] &= q[1]
|
||||
p[2] &= q[2]
|
||||
p[3] &= q[3]
|
||||
p[4] &= q[4]
|
||||
p[5] &= q[5]
|
||||
p[6] &= q[6]
|
||||
p[7] &= q[7]
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Filter) union(g *Filter) {
|
||||
a, b := f.b, g.b
|
||||
for len(a) >= 2 && len(b) >= 2 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
|
||||
p = (*block64)(unsafe.Pointer(&a[1]))
|
||||
q = (*block64)(unsafe.Pointer(&b[1]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
|
||||
a, b = a[2:], b[2:]
|
||||
}
|
||||
|
||||
if len(a) > 0 && len(b) > 0 {
|
||||
p := (*block64)(unsafe.Pointer(&a[0]))
|
||||
q := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
p[0] |= q[0]
|
||||
p[1] |= q[1]
|
||||
p[2] |= q[2]
|
||||
p[3] |= q[3]
|
||||
p[4] |= q[4]
|
||||
p[5] |= q[5]
|
||||
p[6] |= q[6]
|
||||
p[7] |= q[7]
|
||||
}
|
||||
}
|
||||
|
||||
func onescount(b *block) (n int) {
|
||||
p := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
n += bits.OnesCount64(p[0])
|
||||
n += bits.OnesCount64(p[1])
|
||||
n += bits.OnesCount64(p[2])
|
||||
n += bits.OnesCount64(p[3])
|
||||
n += bits.OnesCount64(p[4])
|
||||
n += bits.OnesCount64(p[5])
|
||||
n += bits.OnesCount64(p[6])
|
||||
n += bits.OnesCount64(p[7])
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func onescountAtomic(b *block) (n int) {
|
||||
p := (*block64)(unsafe.Pointer(&b[0]))
|
||||
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[0]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[1]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[2]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[3]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[4]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[5]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[6]))
|
||||
n += bits.OnesCount64(atomic.LoadUint64(&p[7]))
|
||||
|
||||
return n
|
||||
}
|
115
vendor/github.com/greatroar/blobloom/setop_other.go
generated
vendored
Normal file
115
vendor/github.com/greatroar/blobloom/setop_other.go
generated
vendored
Normal file
|
@ -0,0 +1,115 @@
|
|||
// Copyright 2020-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//go:build (!amd64 && !arm64) || nounsafe
|
||||
// +build !amd64,!arm64 nounsafe
|
||||
|
||||
package blobloom
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
func (f *Filter) intersect(g *Filter) {
|
||||
for i := range f.b {
|
||||
f.b[i].intersect(&g.b[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Filter) union(g *Filter) {
|
||||
for i := range f.b {
|
||||
f.b[i].union(&g.b[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (b *block) intersect(c *block) {
|
||||
b[0] &= c[0]
|
||||
b[1] &= c[1]
|
||||
b[2] &= c[2]
|
||||
b[3] &= c[3]
|
||||
b[4] &= c[4]
|
||||
b[5] &= c[5]
|
||||
b[6] &= c[6]
|
||||
b[7] &= c[7]
|
||||
b[8] &= c[8]
|
||||
b[9] &= c[9]
|
||||
b[10] &= c[10]
|
||||
b[11] &= c[11]
|
||||
b[12] &= c[12]
|
||||
b[13] &= c[13]
|
||||
b[14] &= c[14]
|
||||
b[15] &= c[15]
|
||||
}
|
||||
|
||||
func (b *block) union(c *block) {
|
||||
b[0] |= c[0]
|
||||
b[1] |= c[1]
|
||||
b[2] |= c[2]
|
||||
b[3] |= c[3]
|
||||
b[4] |= c[4]
|
||||
b[5] |= c[5]
|
||||
b[6] |= c[6]
|
||||
b[7] |= c[7]
|
||||
b[8] |= c[8]
|
||||
b[9] |= c[9]
|
||||
b[10] |= c[10]
|
||||
b[11] |= c[11]
|
||||
b[12] |= c[12]
|
||||
b[13] |= c[13]
|
||||
b[14] |= c[14]
|
||||
b[15] |= c[15]
|
||||
}
|
||||
|
||||
func onescount(b *block) (n int) {
|
||||
n += bits.OnesCount32(b[0])
|
||||
n += bits.OnesCount32(b[1])
|
||||
n += bits.OnesCount32(b[2])
|
||||
n += bits.OnesCount32(b[3])
|
||||
n += bits.OnesCount32(b[4])
|
||||
n += bits.OnesCount32(b[5])
|
||||
n += bits.OnesCount32(b[6])
|
||||
n += bits.OnesCount32(b[7])
|
||||
n += bits.OnesCount32(b[8])
|
||||
n += bits.OnesCount32(b[9])
|
||||
n += bits.OnesCount32(b[10])
|
||||
n += bits.OnesCount32(b[11])
|
||||
n += bits.OnesCount32(b[12])
|
||||
n += bits.OnesCount32(b[13])
|
||||
n += bits.OnesCount32(b[14])
|
||||
n += bits.OnesCount32(b[15])
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func onescountAtomic(b *block) (n int) {
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[0]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[1]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[2]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[3]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[4]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[5]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[6]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[7]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[8]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[9]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[10]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[11]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[12]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[13]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[14]))
|
||||
n += bits.OnesCount32(atomic.LoadUint32(&b[15]))
|
||||
|
||||
return n
|
||||
}
|
145
vendor/github.com/greatroar/blobloom/sync.go
generated
vendored
Normal file
145
vendor/github.com/greatroar/blobloom/sync.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
// Copyright 2021-2022 the Blobloom authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package blobloom
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// A SyncFilter is a Bloom filter that can be accessed and updated
|
||||
// by multiple goroutines concurrently.
|
||||
//
|
||||
// A SyncFilter mostly behaves as a regular filter protected by a lock,
|
||||
//
|
||||
// type SyncFilter struct {
|
||||
// Filter
|
||||
// lock sync.Mutex
|
||||
// }
|
||||
//
|
||||
// with each method taking and releasing the lock,
|
||||
// but is implemented much more efficiently.
|
||||
// See the method descriptions for exceptions to the previous rule.
|
||||
type SyncFilter struct {
|
||||
b []block // Shards.
|
||||
k int // Number of hash functions required.
|
||||
}
|
||||
|
||||
// NewSync constructs a Bloom filter with given numbers of bits and hash functions.
|
||||
//
|
||||
// The number of bits should be at least BlockBits; smaller values are silently
|
||||
// increased.
|
||||
//
|
||||
// The number of hashes reflects the number of hashes synthesized from the
|
||||
// single hash passed in by the client. It is silently increased to two if
|
||||
// a lower value is given.
|
||||
func NewSync(nbits uint64, nhashes int) *SyncFilter {
|
||||
nbits, nhashes = fixBitsAndHashes(nbits, nhashes)
|
||||
|
||||
return &SyncFilter{
|
||||
b: make([]block, nbits/BlockBits),
|
||||
k: nhashes,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Add insert a key with hash value h into f.
|
||||
func (f *SyncFilter) Add(h uint64) {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
setbitAtomic(b, h1)
|
||||
}
|
||||
}
|
||||
|
||||
// Cardinality estimates the number of distinct keys added to f.
|
||||
//
|
||||
// The estimate is most reliable when f is filled to roughly its capacity.
|
||||
// It gets worse as f gets more densely filled. When one of the blocks is
|
||||
// entirely filled, the estimate becomes +Inf.
|
||||
//
|
||||
// The return value is the maximum likelihood estimate of Papapetrou, Siberski
|
||||
// and Nejdl, summed over the blocks
|
||||
// (https://www.win.tue.nl/~opapapetrou/papers/Bloomfilters-DAPD.pdf).
|
||||
//
|
||||
// If other goroutines are concurrently adding keys,
|
||||
// the estimate may lie in between what would have been returned
|
||||
// before the concurrent updates started and what is returned
|
||||
// after the updates complete.
|
||||
func (f *SyncFilter) Cardinality() float64 {
|
||||
return cardinality(f.k, f.b, onescountAtomic)
|
||||
}
|
||||
|
||||
// Empty reports whether f contains no keys.
|
||||
//
|
||||
// If other goroutines are concurrently adding keys,
|
||||
// Empty may return a false positive.
|
||||
func (f *SyncFilter) Empty() bool {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
if atomic.LoadUint32(&f.b[i][j]) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Fill sets f to a completely full filter.
|
||||
// After Fill, Has returns true for any key.
|
||||
func (f *SyncFilter) Fill() {
|
||||
for i := 0; i < len(f.b); i++ {
|
||||
for j := 0; j < blockWords; j++ {
|
||||
atomic.StoreUint32(&f.b[i][j], ^uint32(0))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Has reports whether a key with hash value h has been added.
|
||||
// It may return a false positive.
|
||||
func (f *SyncFilter) Has(h uint64) bool {
|
||||
h1, h2 := uint32(h>>32), uint32(h)
|
||||
b := getblock(f.b, h2)
|
||||
|
||||
for i := 1; i < f.k; i++ {
|
||||
h1, h2 = doublehash(h1, h2, i)
|
||||
if !getbitAtomic(b, h1) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// getbitAtomic reports whether bit (i modulo BlockBits) is set.
|
||||
func getbitAtomic(b *block, i uint32) bool {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
x := atomic.LoadUint32(&(*b)[(i/wordSize)%blockWords])
|
||||
return x&bit != 0
|
||||
}
|
||||
|
||||
// setbit sets bit (i modulo BlockBits) of b, atomically.
|
||||
func setbitAtomic(b *block, i uint32) {
|
||||
bit := uint32(1) << (i % wordSize)
|
||||
p := &(*b)[(i/wordSize)%blockWords]
|
||||
|
||||
for {
|
||||
old := atomic.LoadUint32(p)
|
||||
if old&bit != 0 {
|
||||
// Checking here instead of checking the return value from
|
||||
// the CAS is between 50% and 80% faster on the benchmark.
|
||||
return
|
||||
}
|
||||
atomic.CompareAndSwapUint32(p, old, old|bit)
|
||||
}
|
||||
}
|
16
vendor/github.com/greatroar/blobloom/test.sh
generated
vendored
Normal file
16
vendor/github.com/greatroar/blobloom/test.sh
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
#!/bin/sh
|
||||
|
||||
set -e -x
|
||||
|
||||
golangci-lint run . examples/*
|
||||
|
||||
go test
|
||||
|
||||
if [ "$(go env GOARCH)" = amd64 ]; then
|
||||
go test -tags nounsafe
|
||||
GOARCH=386 go test
|
||||
fi
|
||||
|
||||
for e in examples/*; do
|
||||
(cd $e && go build && rm $(basename $e))
|
||||
done
|
4
vendor/github.com/nbd-wtf/go-nostr/envelopes.go
generated
vendored
4
vendor/github.com/nbd-wtf/go-nostr/envelopes.go
generated
vendored
|
@ -159,10 +159,6 @@ func (v *CountEnvelope) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
|
||||
if len(arr) < 3 {
|
||||
return fmt.Errorf("COUNT array must have at least 3 items")
|
||||
}
|
||||
|
||||
var countResult struct {
|
||||
Count *int64 `json:"count"`
|
||||
}
|
||||
|
|
16
vendor/github.com/nbd-wtf/go-nostr/event_extra.go
generated
vendored
16
vendor/github.com/nbd-wtf/go-nostr/event_extra.go
generated
vendored
|
@ -1,6 +1,6 @@
|
|||
package nostr
|
||||
|
||||
// SetExtra sets an out-of-the-spec value under the given key into the event object.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt *Event) SetExtra(key string, value any) {
|
||||
if evt.extra == nil {
|
||||
evt.extra = make(map[string]any)
|
||||
|
@ -8,7 +8,7 @@ func (evt *Event) SetExtra(key string, value any) {
|
|||
evt.extra[key] = value
|
||||
}
|
||||
|
||||
// RemoveExtra removes an out-of-the-spec value under the given key from the event object.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt *Event) RemoveExtra(key string) {
|
||||
if evt.extra == nil {
|
||||
return
|
||||
|
@ -16,15 +16,13 @@ func (evt *Event) RemoveExtra(key string) {
|
|||
delete(evt.extra, key)
|
||||
}
|
||||
|
||||
// GetExtra tries to get a value under the given key that may be present in the event object
|
||||
// but is hidden in the basic type since it is out of the spec.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt Event) GetExtra(key string) any {
|
||||
ival, _ := evt.extra[key]
|
||||
return ival
|
||||
}
|
||||
|
||||
// GetExtraString is like [Event.GetExtra], but only works if the value is a string,
|
||||
// otherwise returns the zero-value.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt Event) GetExtraString(key string) string {
|
||||
ival, ok := evt.extra[key]
|
||||
if !ok {
|
||||
|
@ -37,8 +35,7 @@ func (evt Event) GetExtraString(key string) string {
|
|||
return val
|
||||
}
|
||||
|
||||
// GetExtraNumber is like [Event.GetExtra], but only works if the value is a float64,
|
||||
// otherwise returns the zero-value.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt Event) GetExtraNumber(key string) float64 {
|
||||
ival, ok := evt.extra[key]
|
||||
if !ok {
|
||||
|
@ -57,8 +54,7 @@ func (evt Event) GetExtraNumber(key string) float64 {
|
|||
return 0
|
||||
}
|
||||
|
||||
// GetExtraBoolean is like [Event.GetExtra], but only works if the value is a boolean,
|
||||
// otherwise returns the zero-value.
|
||||
// Deprecated: this was never a good idea, stop using.
|
||||
func (evt Event) GetExtraBoolean(key string) bool {
|
||||
ival, ok := evt.extra[key]
|
||||
if !ok {
|
||||
|
|
10
vendor/github.com/nbd-wtf/go-nostr/helpers.go
generated
vendored
10
vendor/github.com/nbd-wtf/go-nostr/helpers.go
generated
vendored
|
@ -103,3 +103,13 @@ func subIdToSerial(subId string) int64 {
|
|||
serialId, _ := strconv.ParseInt(subId[0:n], 10, 64)
|
||||
return serialId
|
||||
}
|
||||
|
||||
func isLowerHex(thing string) bool {
|
||||
for _, charNumber := range thing {
|
||||
if (charNumber >= 48 && charNumber <= 57) || (charNumber >= 97 && charNumber <= 102) {
|
||||
continue
|
||||
}
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
20
vendor/github.com/nbd-wtf/go-nostr/keyer.go
generated
vendored
Normal file
20
vendor/github.com/nbd-wtf/go-nostr/keyer.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
|||
package nostr
|
||||
|
||||
import "context"
|
||||
|
||||
type Keyer interface {
|
||||
Signer
|
||||
Cipher
|
||||
}
|
||||
|
||||
// A Signer provides basic public key signing methods.
|
||||
type Signer interface {
|
||||
GetPublicKey(context.Context) (string, error)
|
||||
SignEvent(context.Context, *Event) error
|
||||
}
|
||||
|
||||
// A Cipher provides NIP-44 encryption and decryption methods.
|
||||
type Cipher interface {
|
||||
Encrypt(ctx context.Context, plaintext string, recipientPublicKey string) (base64ciphertext string, err error)
|
||||
Decrypt(ctx context.Context, base64ciphertext string, senderPublicKey string) (plaintext string, err error)
|
||||
}
|
10
vendor/github.com/nbd-wtf/go-nostr/keys.go
generated
vendored
10
vendor/github.com/nbd-wtf/go-nostr/keys.go
generated
vendored
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"math/big"
|
||||
"strings"
|
||||
|
||||
"github.com/btcsuite/btcd/btcec/v2"
|
||||
"github.com/btcsuite/btcd/btcec/v2/schnorr"
|
||||
|
@ -39,16 +38,11 @@ func GetPublicKey(sk string) (string, error) {
|
|||
return hex.EncodeToString(schnorr.SerializePubKey(pk)), nil
|
||||
}
|
||||
|
||||
// Deprecated: use IsValid32ByteHex instead -- functionality unchanged.
|
||||
func IsValidPublicKeyHex(pk string) bool {
|
||||
if strings.ToLower(pk) != pk {
|
||||
func IsValidPublicKey(pk string) bool {
|
||||
if !isLowerHex(pk) {
|
||||
return false
|
||||
}
|
||||
dec, _ := hex.DecodeString(pk)
|
||||
return len(dec) == 32
|
||||
}
|
||||
|
||||
func IsValidPublicKey(pk string) bool {
|
||||
v, _ := hex.DecodeString(pk)
|
||||
_, err := schnorr.ParsePubKey(v)
|
||||
return err == nil
|
||||
|
|
181
vendor/github.com/nbd-wtf/go-nostr/nip77/envelopes.go
generated
vendored
Normal file
181
vendor/github.com/nbd-wtf/go-nostr/nip77/envelopes.go
generated
vendored
Normal file
|
@ -0,0 +1,181 @@
|
|||
package nip77
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/mailru/easyjson"
|
||||
jwriter "github.com/mailru/easyjson/jwriter"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/tidwall/gjson"
|
||||
)
|
||||
|
||||
func ParseNegMessage(message []byte) nostr.Envelope {
|
||||
firstComma := bytes.Index(message, []byte{','})
|
||||
if firstComma == -1 {
|
||||
return nil
|
||||
}
|
||||
label := message[0:firstComma]
|
||||
|
||||
var v nostr.Envelope
|
||||
switch {
|
||||
case bytes.Contains(label, []byte("NEG-MSG")):
|
||||
v = &MessageEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-OPEN")):
|
||||
v = &OpenEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-ERR")):
|
||||
v = &ErrorEnvelope{}
|
||||
case bytes.Contains(label, []byte("NEG-CLOSE")):
|
||||
v = &CloseEnvelope{}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := v.UnmarshalJSON(message); err != nil {
|
||||
return nil
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
var (
|
||||
_ nostr.Envelope = (*OpenEnvelope)(nil)
|
||||
_ nostr.Envelope = (*MessageEnvelope)(nil)
|
||||
_ nostr.Envelope = (*CloseEnvelope)(nil)
|
||||
_ nostr.Envelope = (*ErrorEnvelope)(nil)
|
||||
)
|
||||
|
||||
type OpenEnvelope struct {
|
||||
SubscriptionID string
|
||||
Filter nostr.Filter
|
||||
Message string
|
||||
}
|
||||
|
||||
func (_ OpenEnvelope) Label() string { return "NEG-OPEN" }
|
||||
func (v OpenEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *OpenEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) != 4 {
|
||||
return fmt.Errorf("failed to decode NEG-OPEN envelope")
|
||||
}
|
||||
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Message = arr[3].Str
|
||||
return easyjson.Unmarshal([]byte(arr[2].Raw), &v.Filter)
|
||||
}
|
||||
|
||||
func (v OpenEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 17+len(v.SubscriptionID)+len(v.Message)+500))
|
||||
|
||||
res.WriteString(`["NEG-OPEN","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`",`)
|
||||
|
||||
w := jwriter.Writer{}
|
||||
v.Filter.MarshalEasyJSON(&w)
|
||||
w.Buffer.DumpTo(res)
|
||||
|
||||
res.WriteString(`,"`)
|
||||
res.WriteString(v.Message)
|
||||
res.WriteString(`"]`)
|
||||
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type MessageEnvelope struct {
|
||||
SubscriptionID string
|
||||
Message string
|
||||
}
|
||||
|
||||
func (_ MessageEnvelope) Label() string { return "NEG-MSG" }
|
||||
func (v MessageEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *MessageEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 3 {
|
||||
return fmt.Errorf("failed to decode NEG-MSG envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Message = arr[2].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v MessageEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 17+len(v.SubscriptionID)+len(v.Message)))
|
||||
|
||||
res.WriteString(`["NEG-MSG","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`","`)
|
||||
res.WriteString(v.Message)
|
||||
res.WriteString(`"]`)
|
||||
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type CloseEnvelope struct {
|
||||
SubscriptionID string
|
||||
}
|
||||
|
||||
func (_ CloseEnvelope) Label() string { return "NEG-CLOSE" }
|
||||
func (v CloseEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *CloseEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 2 {
|
||||
return fmt.Errorf("failed to decode NEG-CLOSE envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v CloseEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 14+len(v.SubscriptionID)))
|
||||
res.WriteString(`["NEG-CLOSE","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`"]`)
|
||||
return res.Bytes(), nil
|
||||
}
|
||||
|
||||
type ErrorEnvelope struct {
|
||||
SubscriptionID string
|
||||
Reason string
|
||||
}
|
||||
|
||||
func (_ ErrorEnvelope) Label() string { return "NEG-ERROR" }
|
||||
func (v ErrorEnvelope) String() string {
|
||||
b, _ := v.MarshalJSON()
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func (v *ErrorEnvelope) UnmarshalJSON(data []byte) error {
|
||||
r := gjson.ParseBytes(data)
|
||||
arr := r.Array()
|
||||
if len(arr) < 3 {
|
||||
return fmt.Errorf("failed to decode NEG-ERROR envelope")
|
||||
}
|
||||
v.SubscriptionID = arr[1].Str
|
||||
v.Reason = arr[2].Str
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v ErrorEnvelope) MarshalJSON() ([]byte, error) {
|
||||
res := bytes.NewBuffer(make([]byte, 0, 19+len(v.SubscriptionID)+len(v.Reason)))
|
||||
res.WriteString(`["NEG-ERROR","`)
|
||||
res.WriteString(v.SubscriptionID)
|
||||
res.WriteString(`","`)
|
||||
res.WriteString(v.Reason)
|
||||
res.WriteString(`"]`)
|
||||
return res.Bytes(), nil
|
||||
}
|
41
vendor/github.com/nbd-wtf/go-nostr/nip77/idlistpool.go
generated
vendored
Normal file
41
vendor/github.com/nbd-wtf/go-nostr/nip77/idlistpool.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
package nip77
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type idlistpool struct {
|
||||
initialsize int
|
||||
pool [][]string
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func newidlistpool(initialsize int) *idlistpool {
|
||||
ilp := idlistpool{
|
||||
initialsize: initialsize,
|
||||
pool: make([][]string, 1, 2),
|
||||
}
|
||||
|
||||
ilp.pool[0] = make([]string, 0, initialsize)
|
||||
|
||||
return &ilp
|
||||
}
|
||||
|
||||
func (ilp *idlistpool) grab() []string {
|
||||
ilp.Lock()
|
||||
defer ilp.Unlock()
|
||||
|
||||
l := len(ilp.pool)
|
||||
if l > 0 {
|
||||
idlist := ilp.pool[l-1]
|
||||
ilp.pool = ilp.pool[0 : l-1]
|
||||
return idlist
|
||||
}
|
||||
idlist := make([]string, 0, ilp.initialsize)
|
||||
return idlist
|
||||
}
|
||||
|
||||
func (ilp *idlistpool) giveback(idlist []string) {
|
||||
idlist = idlist[:0]
|
||||
ilp.pool = append(ilp.pool, idlist)
|
||||
}
|
1
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/README
generated
vendored
Normal file
1
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/README
generated
vendored
Normal file
|
@ -0,0 +1 @@
|
|||
this is a fork of https://github.com/illuzen/go-negentropy
|
143
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/encoding.go
generated
vendored
Normal file
143
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/encoding.go
generated
vendored
Normal file
|
@ -0,0 +1,143 @@
|
|||
package negentropy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
func (n *Negentropy) readTimestamp(reader *StringHexReader) (nostr.Timestamp, error) {
|
||||
delta, err := readVarInt(reader)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if delta == 0 {
|
||||
// zeroes are infinite
|
||||
timestamp := maxTimestamp
|
||||
n.lastTimestampIn = timestamp
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
// remove 1 as we always add 1 when encoding
|
||||
delta--
|
||||
|
||||
// we add the previously cached timestamp to get the current
|
||||
timestamp := n.lastTimestampIn + nostr.Timestamp(delta)
|
||||
|
||||
// cache this so we can apply it to the delta next time
|
||||
n.lastTimestampIn = timestamp
|
||||
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) readBound(reader *StringHexReader) (Bound, error) {
|
||||
timestamp, err := n.readTimestamp(reader)
|
||||
if err != nil {
|
||||
return Bound{}, fmt.Errorf("failed to decode bound timestamp: %w", err)
|
||||
}
|
||||
|
||||
length, err := readVarInt(reader)
|
||||
if err != nil {
|
||||
return Bound{}, fmt.Errorf("failed to decode bound length: %w", err)
|
||||
}
|
||||
|
||||
id, err := reader.ReadString(length * 2)
|
||||
if err != nil {
|
||||
return Bound{}, fmt.Errorf("failed to read bound id: %w", err)
|
||||
}
|
||||
|
||||
return Bound{Item{timestamp, id}}, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) writeTimestamp(w *StringHexWriter, timestamp nostr.Timestamp) {
|
||||
if timestamp == maxTimestamp {
|
||||
// zeroes are infinite
|
||||
n.lastTimestampOut = maxTimestamp // cache this (see below)
|
||||
writeVarInt(w, 0)
|
||||
return
|
||||
}
|
||||
|
||||
// we will only encode the difference between this timestamp and the previous
|
||||
delta := timestamp - n.lastTimestampOut
|
||||
|
||||
// we cache this here as the next timestamp we encode will be just a delta from this
|
||||
n.lastTimestampOut = timestamp
|
||||
|
||||
// add 1 to prevent zeroes from being read as infinites
|
||||
writeVarInt(w, int(delta+1))
|
||||
return
|
||||
}
|
||||
|
||||
func (n *Negentropy) writeBound(w *StringHexWriter, bound Bound) {
|
||||
n.writeTimestamp(w, bound.Timestamp)
|
||||
writeVarInt(w, len(bound.ID)/2)
|
||||
w.WriteHex(bound.Item.ID)
|
||||
}
|
||||
|
||||
func getMinimalBound(prev, curr Item) Bound {
|
||||
if curr.Timestamp != prev.Timestamp {
|
||||
return Bound{Item{curr.Timestamp, ""}}
|
||||
}
|
||||
|
||||
sharedPrefixBytes := 0
|
||||
|
||||
for i := 0; i < 32; i += 2 {
|
||||
if curr.ID[i:i+2] != prev.ID[i:i+2] {
|
||||
break
|
||||
}
|
||||
sharedPrefixBytes++
|
||||
}
|
||||
|
||||
// sharedPrefixBytes + 1 to include the first differing byte, or the entire ID if identical.
|
||||
return Bound{Item{curr.Timestamp, curr.ID[:(sharedPrefixBytes+1)*2]}}
|
||||
}
|
||||
|
||||
func readVarInt(reader *StringHexReader) (int, error) {
|
||||
var res int = 0
|
||||
|
||||
for {
|
||||
b, err := reader.ReadHexByte()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
res = (res << 7) | (int(b) & 127)
|
||||
if (b & 128) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func writeVarInt(w *StringHexWriter, n int) {
|
||||
if n == 0 {
|
||||
w.WriteByte(0)
|
||||
return
|
||||
}
|
||||
|
||||
w.WriteBytes(EncodeVarInt(n))
|
||||
}
|
||||
|
||||
func EncodeVarInt(n int) []byte {
|
||||
if n == 0 {
|
||||
return []byte{0}
|
||||
}
|
||||
|
||||
result := make([]byte, 8)
|
||||
idx := 7
|
||||
|
||||
for n != 0 {
|
||||
result[idx] = byte(n & 0x7F)
|
||||
n >>= 7
|
||||
idx--
|
||||
}
|
||||
|
||||
result = result[idx+1:]
|
||||
for i := 0; i < len(result)-1; i++ {
|
||||
result[i] |= 0x80
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
93
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/hex.go
generated
vendored
Normal file
93
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/hex.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
package negentropy
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"io"
|
||||
)
|
||||
|
||||
func NewStringHexReader(source string) *StringHexReader {
|
||||
return &StringHexReader{source, 0, make([]byte, 1)}
|
||||
}
|
||||
|
||||
type StringHexReader struct {
|
||||
source string
|
||||
idx int
|
||||
|
||||
tmp []byte
|
||||
}
|
||||
|
||||
func (r *StringHexReader) Len() int {
|
||||
return len(r.source) - r.idx
|
||||
}
|
||||
|
||||
func (r *StringHexReader) ReadHexBytes(buf []byte) error {
|
||||
n := len(buf) * 2
|
||||
r.idx += n
|
||||
if len(r.source) < r.idx {
|
||||
return io.EOF
|
||||
}
|
||||
_, err := hex.Decode(buf, []byte(r.source[r.idx-n:r.idx]))
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *StringHexReader) ReadHexByte() (byte, error) {
|
||||
err := r.ReadHexBytes(r.tmp)
|
||||
return r.tmp[0], err
|
||||
}
|
||||
|
||||
func (r *StringHexReader) ReadString(size int) (string, error) {
|
||||
r.idx += size
|
||||
if len(r.source) < r.idx {
|
||||
return "", io.EOF
|
||||
}
|
||||
return r.source[r.idx-size : r.idx], nil
|
||||
}
|
||||
|
||||
func NewStringHexWriter(buf []byte) *StringHexWriter {
|
||||
return &StringHexWriter{buf, make([]byte, 2)}
|
||||
}
|
||||
|
||||
type StringHexWriter struct {
|
||||
hexbuf []byte
|
||||
|
||||
tmp []byte
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) Len() int {
|
||||
return len(r.hexbuf)
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) Hex() string {
|
||||
return string(r.hexbuf)
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) Reset() {
|
||||
r.hexbuf = r.hexbuf[:0]
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) WriteHex(hexString string) {
|
||||
r.hexbuf = append(r.hexbuf, hexString...)
|
||||
return
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) WriteByte(b byte) error {
|
||||
hex.Encode(r.tmp, []byte{b})
|
||||
r.hexbuf = append(r.hexbuf, r.tmp...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *StringHexWriter) WriteBytes(in []byte) {
|
||||
r.hexbuf = hex.AppendEncode(r.hexbuf, in)
|
||||
|
||||
// curr := len(r.hexbuf)
|
||||
// next := curr + len(in)*2
|
||||
// for cap(r.hexbuf) < next {
|
||||
// r.hexbuf = append(r.hexbuf, in...)
|
||||
// }
|
||||
// r.hexbuf = r.hexbuf[0:next]
|
||||
// dst := r.hexbuf[curr:next]
|
||||
|
||||
// hex.Encode(dst, in)
|
||||
|
||||
return
|
||||
}
|
300
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/negentropy.go
generated
vendored
Normal file
300
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/negentropy.go
generated
vendored
Normal file
|
@ -0,0 +1,300 @@
|
|||
package negentropy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
const (
|
||||
protocolVersion byte = 0x61 // version 1
|
||||
maxTimestamp = nostr.Timestamp(math.MaxInt64)
|
||||
buckets = 16
|
||||
)
|
||||
|
||||
var InfiniteBound = Bound{Item: Item{Timestamp: maxTimestamp}}
|
||||
|
||||
type Negentropy struct {
|
||||
storage Storage
|
||||
initialized bool
|
||||
frameSizeLimit int
|
||||
isClient bool
|
||||
lastTimestampIn nostr.Timestamp
|
||||
lastTimestampOut nostr.Timestamp
|
||||
|
||||
Haves chan string
|
||||
HaveNots chan string
|
||||
}
|
||||
|
||||
func New(storage Storage, frameSizeLimit int) *Negentropy {
|
||||
if frameSizeLimit == 0 {
|
||||
frameSizeLimit = math.MaxInt
|
||||
} else if frameSizeLimit < 4096 {
|
||||
panic(fmt.Errorf("frameSizeLimit can't be smaller than 4096, was %d", frameSizeLimit))
|
||||
}
|
||||
|
||||
return &Negentropy{
|
||||
storage: storage,
|
||||
frameSizeLimit: frameSizeLimit,
|
||||
Haves: make(chan string, buckets*4),
|
||||
HaveNots: make(chan string, buckets*4),
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Negentropy) String() string {
|
||||
label := "uninitialized"
|
||||
if n.initialized {
|
||||
label = "server"
|
||||
if n.isClient {
|
||||
label = "client"
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("<Negentropy %s with %d items>", label, n.storage.Size())
|
||||
}
|
||||
|
||||
func (n *Negentropy) Start() string {
|
||||
n.initialized = true
|
||||
n.isClient = true
|
||||
|
||||
output := NewStringHexWriter(make([]byte, 0, 1+n.storage.Size()*64))
|
||||
output.WriteByte(protocolVersion)
|
||||
n.SplitRange(0, n.storage.Size(), InfiniteBound, output)
|
||||
|
||||
return output.Hex()
|
||||
}
|
||||
|
||||
func (n *Negentropy) Reconcile(msg string) (output string, err error) {
|
||||
n.initialized = true
|
||||
reader := NewStringHexReader(msg)
|
||||
|
||||
output, err = n.reconcileAux(reader)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(output) == 2 && n.isClient {
|
||||
close(n.Haves)
|
||||
close(n.HaveNots)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) reconcileAux(reader *StringHexReader) (string, error) {
|
||||
n.lastTimestampIn, n.lastTimestampOut = 0, 0 // reset for each message
|
||||
|
||||
fullOutput := NewStringHexWriter(make([]byte, 0, 5000))
|
||||
fullOutput.WriteByte(protocolVersion)
|
||||
|
||||
pv, err := reader.ReadHexByte()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read pv: %w", err)
|
||||
}
|
||||
if pv != protocolVersion {
|
||||
if n.isClient {
|
||||
return "", fmt.Errorf("unsupported negentropy protocol version %v", pv)
|
||||
}
|
||||
|
||||
// if we're a server we just return our protocol version
|
||||
return fullOutput.Hex(), nil
|
||||
}
|
||||
|
||||
var prevBound Bound
|
||||
prevIndex := 0
|
||||
skipping := false // this means we are currently coalescing ranges into skip
|
||||
|
||||
partialOutput := NewStringHexWriter(make([]byte, 0, 100))
|
||||
for reader.Len() > 0 {
|
||||
partialOutput.Reset()
|
||||
|
||||
finishSkip := func() {
|
||||
// end skip range, if necessary, so we can start a new bound that isn't a skip
|
||||
if skipping {
|
||||
skipping = false
|
||||
n.writeBound(partialOutput, prevBound)
|
||||
partialOutput.WriteByte(byte(SkipMode))
|
||||
}
|
||||
}
|
||||
|
||||
currBound, err := n.readBound(reader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode bound: %w", err)
|
||||
}
|
||||
modeVal, err := readVarInt(reader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode mode: %w", err)
|
||||
}
|
||||
mode := Mode(modeVal)
|
||||
|
||||
lower := prevIndex
|
||||
upper := n.storage.FindLowerBound(prevIndex, n.storage.Size(), currBound)
|
||||
|
||||
switch mode {
|
||||
case SkipMode:
|
||||
skipping = true
|
||||
|
||||
case FingerprintMode:
|
||||
theirFingerprint, err := reader.ReadString(FingerprintSize * 2)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read fingerprint: %w", err)
|
||||
}
|
||||
ourFingerprint := n.storage.Fingerprint(lower, upper)
|
||||
|
||||
if theirFingerprint == ourFingerprint {
|
||||
skipping = true
|
||||
} else {
|
||||
finishSkip()
|
||||
n.SplitRange(lower, upper, currBound, partialOutput)
|
||||
}
|
||||
|
||||
case IdListMode:
|
||||
numIds, err := readVarInt(reader)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to decode number of ids: %w", err)
|
||||
}
|
||||
|
||||
// what they have
|
||||
theirItems := make(map[string]struct{}, numIds)
|
||||
for i := 0; i < numIds; i++ {
|
||||
if id, err := reader.ReadString(64); err != nil {
|
||||
return "", fmt.Errorf("failed to read id (#%d/%d) in list: %w", i, numIds, err)
|
||||
} else {
|
||||
theirItems[id] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// what we have
|
||||
for _, item := range n.storage.Range(lower, upper) {
|
||||
id := item.ID
|
||||
|
||||
if _, theyHave := theirItems[id]; theyHave {
|
||||
// if we have and they have, ignore
|
||||
delete(theirItems, id)
|
||||
} else {
|
||||
// if we have and they don't, notify client
|
||||
if n.isClient {
|
||||
n.Haves <- id
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n.isClient {
|
||||
// notify client of what they have and we don't
|
||||
for id := range theirItems {
|
||||
// skip empty strings here because those were marked to be excluded as such in the previous step
|
||||
n.HaveNots <- id
|
||||
}
|
||||
|
||||
// client got list of ids, it's done, skip
|
||||
skipping = true
|
||||
} else {
|
||||
// server got list of ids, reply with their own ids for the same range
|
||||
finishSkip()
|
||||
|
||||
responseIds := strings.Builder{}
|
||||
responseIds.Grow(64 * 100)
|
||||
responses := 0
|
||||
|
||||
endBound := currBound
|
||||
|
||||
for index, item := range n.storage.Range(lower, upper) {
|
||||
if n.frameSizeLimit-200 < fullOutput.Len()/2+responseIds.Len()/2 {
|
||||
endBound = Bound{item}
|
||||
upper = index
|
||||
break
|
||||
}
|
||||
responseIds.WriteString(item.ID)
|
||||
responses++
|
||||
}
|
||||
|
||||
n.writeBound(partialOutput, endBound)
|
||||
partialOutput.WriteByte(byte(IdListMode))
|
||||
writeVarInt(partialOutput, responses)
|
||||
partialOutput.WriteHex(responseIds.String())
|
||||
|
||||
fullOutput.WriteHex(partialOutput.Hex())
|
||||
partialOutput.Reset()
|
||||
}
|
||||
|
||||
default:
|
||||
return "", fmt.Errorf("unexpected mode %d", mode)
|
||||
}
|
||||
|
||||
if n.frameSizeLimit-200 < fullOutput.Len()/2+partialOutput.Len()/2 {
|
||||
// frame size limit exceeded, handle by encoding a boundary and fingerprint for the remaining range
|
||||
remainingFingerprint := n.storage.Fingerprint(upper, n.storage.Size())
|
||||
n.writeBound(fullOutput, InfiniteBound)
|
||||
fullOutput.WriteByte(byte(FingerprintMode))
|
||||
fullOutput.WriteHex(remainingFingerprint)
|
||||
|
||||
break // stop processing further
|
||||
} else {
|
||||
// append the constructed output for this iteration
|
||||
fullOutput.WriteHex(partialOutput.Hex())
|
||||
}
|
||||
|
||||
prevIndex = upper
|
||||
prevBound = currBound
|
||||
}
|
||||
|
||||
return fullOutput.Hex(), nil
|
||||
}
|
||||
|
||||
func (n *Negentropy) SplitRange(lower, upper int, upperBound Bound, output *StringHexWriter) {
|
||||
numElems := upper - lower
|
||||
|
||||
if numElems < buckets*2 {
|
||||
// we just send the full ids here
|
||||
n.writeBound(output, upperBound)
|
||||
output.WriteByte(byte(IdListMode))
|
||||
writeVarInt(output, numElems)
|
||||
|
||||
for _, item := range n.storage.Range(lower, upper) {
|
||||
output.WriteHex(item.ID)
|
||||
}
|
||||
} else {
|
||||
itemsPerBucket := numElems / buckets
|
||||
bucketsWithExtra := numElems % buckets
|
||||
curr := lower
|
||||
|
||||
for i := 0; i < buckets; i++ {
|
||||
bucketSize := itemsPerBucket
|
||||
if i < bucketsWithExtra {
|
||||
bucketSize++
|
||||
}
|
||||
ourFingerprint := n.storage.Fingerprint(curr, curr+bucketSize)
|
||||
curr += bucketSize
|
||||
|
||||
var nextBound Bound
|
||||
if curr == upper {
|
||||
nextBound = upperBound
|
||||
} else {
|
||||
var prevItem, currItem Item
|
||||
|
||||
for index, item := range n.storage.Range(curr-1, curr+1) {
|
||||
if index == curr-1 {
|
||||
prevItem = item
|
||||
} else {
|
||||
currItem = item
|
||||
}
|
||||
}
|
||||
|
||||
minBound := getMinimalBound(prevItem, currItem)
|
||||
nextBound = minBound
|
||||
}
|
||||
|
||||
n.writeBound(output, nextBound)
|
||||
output.WriteByte(byte(FingerprintMode))
|
||||
output.WriteHex(ourFingerprint)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Negentropy) Name() string {
|
||||
p := unsafe.Pointer(n)
|
||||
return fmt.Sprintf("%d", uintptr(p)&127)
|
||||
}
|
13
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage.go
generated
vendored
Normal file
13
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
|||
package negentropy
|
||||
|
||||
import (
|
||||
"iter"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
Size() int
|
||||
Range(begin, end int) iter.Seq2[int, Item]
|
||||
FindLowerBound(begin, end int, value Bound) int
|
||||
GetBound(idx int) Bound
|
||||
Fingerprint(begin, end int) string
|
||||
}
|
49
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector/accumulator.go
generated
vendored
Normal file
49
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector/accumulator.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
|||
package vector
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
)
|
||||
|
||||
type Accumulator struct {
|
||||
Buf [32 + 8]byte // leave 8 bytes at the end as a slack for use in GetFingerprint append()
|
||||
}
|
||||
|
||||
func (acc *Accumulator) Reset() {
|
||||
for i := 0; i < 32; i++ {
|
||||
acc.Buf[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *Accumulator) AddAccumulator(other Accumulator) {
|
||||
acc.AddBytes(other.Buf[:32])
|
||||
}
|
||||
|
||||
func (acc *Accumulator) AddBytes(other []byte) {
|
||||
var currCarry, nextCarry uint32
|
||||
|
||||
for i := 0; i < 8; i++ {
|
||||
offset := i * 4
|
||||
orig := binary.LittleEndian.Uint32(acc.Buf[offset:])
|
||||
otherV := binary.LittleEndian.Uint32(other[offset:])
|
||||
|
||||
next := orig + currCarry + otherV
|
||||
if next < orig || next < otherV {
|
||||
nextCarry = 1
|
||||
}
|
||||
|
||||
binary.LittleEndian.PutUint32(acc.Buf[offset:32], next&0xFFFFFFFF)
|
||||
currCarry = nextCarry
|
||||
nextCarry = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (acc *Accumulator) GetFingerprint(n int) string {
|
||||
input := acc.Buf[:32]
|
||||
input = append(input, negentropy.EncodeVarInt(n)...)
|
||||
hash := sha256.Sum256(input)
|
||||
return hex.EncodeToString(hash[:negentropy.FingerprintSize])
|
||||
}
|
77
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector/vector.go
generated
vendored
Normal file
77
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector/vector.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
package vector
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"iter"
|
||||
"slices"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
)
|
||||
|
||||
type Vector struct {
|
||||
items []negentropy.Item
|
||||
sealed bool
|
||||
|
||||
acc Accumulator
|
||||
}
|
||||
|
||||
func New() *Vector {
|
||||
return &Vector{
|
||||
items: make([]negentropy.Item, 0, 30),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Vector) Insert(createdAt nostr.Timestamp, id string) {
|
||||
if len(id) != 64 {
|
||||
panic(fmt.Errorf("bad id size for added item: expected %d bytes, got %d", 32, len(id)/2))
|
||||
}
|
||||
|
||||
item := negentropy.Item{Timestamp: createdAt, ID: id}
|
||||
v.items = append(v.items, item)
|
||||
}
|
||||
|
||||
func (v *Vector) Size() int { return len(v.items) }
|
||||
|
||||
func (v *Vector) Seal() {
|
||||
if v.sealed {
|
||||
panic("trying to seal an already sealed vector")
|
||||
}
|
||||
v.sealed = true
|
||||
slices.SortFunc(v.items, negentropy.ItemCompare)
|
||||
}
|
||||
|
||||
func (v *Vector) GetBound(idx int) negentropy.Bound {
|
||||
if idx < len(v.items) {
|
||||
return negentropy.Bound{Item: v.items[idx]}
|
||||
}
|
||||
return negentropy.InfiniteBound
|
||||
}
|
||||
|
||||
func (v *Vector) Range(begin, end int) iter.Seq2[int, negentropy.Item] {
|
||||
return func(yield func(int, negentropy.Item) bool) {
|
||||
for i := begin; i < end; i++ {
|
||||
if !yield(i, v.items[i]) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (v *Vector) FindLowerBound(begin, end int, bound negentropy.Bound) int {
|
||||
idx, _ := slices.BinarySearchFunc(v.items[begin:end], bound.Item, negentropy.ItemCompare)
|
||||
return begin + idx
|
||||
}
|
||||
|
||||
func (v *Vector) Fingerprint(begin, end int) string {
|
||||
v.acc.Reset()
|
||||
|
||||
tmp := make([]byte, 32)
|
||||
for _, item := range v.Range(begin, end) {
|
||||
hex.Decode(tmp, []byte(item.ID))
|
||||
v.acc.AddBytes(tmp)
|
||||
}
|
||||
|
||||
return v.acc.GetFingerprint(end - begin)
|
||||
}
|
55
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/types.go
generated
vendored
Normal file
55
vendor/github.com/nbd-wtf/go-nostr/nip77/negentropy/types.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
|||
package negentropy
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
)
|
||||
|
||||
const FingerprintSize = 16
|
||||
|
||||
type Mode uint8
|
||||
|
||||
const (
|
||||
SkipMode Mode = 0
|
||||
FingerprintMode Mode = 1
|
||||
IdListMode Mode = 2
|
||||
)
|
||||
|
||||
func (v Mode) String() string {
|
||||
switch v {
|
||||
case SkipMode:
|
||||
return "SKIP"
|
||||
case FingerprintMode:
|
||||
return "FINGERPRINT"
|
||||
case IdListMode:
|
||||
return "IDLIST"
|
||||
default:
|
||||
return "<UNKNOWN-ERROR>"
|
||||
}
|
||||
}
|
||||
|
||||
type Item struct {
|
||||
Timestamp nostr.Timestamp
|
||||
ID string
|
||||
}
|
||||
|
||||
func ItemCompare(a, b Item) int {
|
||||
if a.Timestamp == b.Timestamp {
|
||||
return strings.Compare(a.ID, b.ID)
|
||||
}
|
||||
return cmp.Compare(a.Timestamp, b.Timestamp)
|
||||
}
|
||||
|
||||
func (i Item) String() string { return fmt.Sprintf("Item<%d:%s>", i.Timestamp, i.ID) }
|
||||
|
||||
type Bound struct{ Item }
|
||||
|
||||
func (b Bound) String() string {
|
||||
if b.Timestamp == InfiniteBound.Timestamp {
|
||||
return "Bound<infinite>"
|
||||
}
|
||||
return fmt.Sprintf("Bound<%d:%s>", b.Timestamp, b.ID)
|
||||
}
|
144
vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go
generated
vendored
Normal file
144
vendor/github.com/nbd-wtf/go-nostr/nip77/nip77.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
package nip77
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/cespare/xxhash"
|
||||
"github.com/greatroar/blobloom"
|
||||
"github.com/nbd-wtf/go-nostr"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy"
|
||||
"github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector"
|
||||
)
|
||||
|
||||
func NegentropySync(ctx context.Context, store nostr.RelayStore, url string, filter nostr.Filter) error {
|
||||
id := "go-nostr-tmp" // for now we can't have more than one subscription in the same connection
|
||||
|
||||
data, err := store.QuerySync(ctx, filter)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query our local store: %w", err)
|
||||
}
|
||||
|
||||
vec := vector.New()
|
||||
neg := negentropy.New(vec, 1024*1024)
|
||||
for _, evt := range data {
|
||||
vec.Insert(evt.CreatedAt, evt.ID)
|
||||
}
|
||||
vec.Seal()
|
||||
|
||||
result := make(chan error)
|
||||
|
||||
var r *nostr.Relay
|
||||
r, err = nostr.RelayConnect(ctx, url, nostr.WithCustomHandler(func(data []byte) {
|
||||
envelope := ParseNegMessage(data)
|
||||
if envelope == nil {
|
||||
return
|
||||
}
|
||||
switch env := envelope.(type) {
|
||||
case *OpenEnvelope, *CloseEnvelope:
|
||||
result <- fmt.Errorf("unexpected %s received from relay", env.Label())
|
||||
return
|
||||
case *ErrorEnvelope:
|
||||
result <- fmt.Errorf("relay returned a %s: %s", env.Label(), env.Reason)
|
||||
return
|
||||
case *MessageEnvelope:
|
||||
nextmsg, err := neg.Reconcile(env.Message)
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("failed to reconcile: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
if nextmsg != "" {
|
||||
msgb, _ := MessageEnvelope{id, nextmsg}.MarshalJSON()
|
||||
r.Write(msgb)
|
||||
}
|
||||
}
|
||||
}))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msg := neg.Start()
|
||||
open, _ := OpenEnvelope{id, filter, msg}.MarshalJSON()
|
||||
err = <-r.Write(open)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to relay: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
clse, _ := CloseEnvelope{id}.MarshalJSON()
|
||||
r.Write(clse)
|
||||
}()
|
||||
|
||||
type direction struct {
|
||||
label string
|
||||
items chan string
|
||||
source nostr.RelayStore
|
||||
target nostr.RelayStore
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
pool := newidlistpool(50)
|
||||
for _, dir := range []direction{
|
||||
{"up", neg.Haves, store, r},
|
||||
{"down", neg.HaveNots, r, store},
|
||||
} {
|
||||
wg.Add(1)
|
||||
go func(dir direction) {
|
||||
defer wg.Done()
|
||||
|
||||
seen := blobloom.NewOptimized(blobloom.Config{
|
||||
Capacity: 10000,
|
||||
FPRate: 0.01,
|
||||
})
|
||||
|
||||
doSync := func(ids []string) {
|
||||
defer wg.Done()
|
||||
defer pool.giveback(ids)
|
||||
|
||||
if len(ids) == 0 {
|
||||
return
|
||||
}
|
||||
evtch, err := dir.source.QueryEvents(ctx, nostr.Filter{IDs: ids})
|
||||
if err != nil {
|
||||
result <- fmt.Errorf("error querying source on %s: %w", dir.label, err)
|
||||
return
|
||||
}
|
||||
for evt := range evtch {
|
||||
dir.target.Publish(ctx, *evt)
|
||||
}
|
||||
}
|
||||
|
||||
ids := pool.grab()
|
||||
for item := range dir.items {
|
||||
h := xxhash.Sum64([]byte(item))
|
||||
if seen.Has(h) {
|
||||
continue
|
||||
}
|
||||
|
||||
seen.Add(h)
|
||||
ids = append(ids, item)
|
||||
if len(ids) == 50 {
|
||||
wg.Add(1)
|
||||
go doSync(ids)
|
||||
ids = pool.grab()
|
||||
}
|
||||
}
|
||||
wg.Add(1)
|
||||
doSync(ids)
|
||||
}(dir)
|
||||
}
|
||||
|
||||
go func() {
|
||||
wg.Wait()
|
||||
result <- nil
|
||||
}()
|
||||
|
||||
err = <-result
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
26
vendor/github.com/nbd-wtf/go-nostr/pool.go
generated
vendored
26
vendor/github.com/nbd-wtf/go-nostr/pool.go
generated
vendored
|
@ -178,6 +178,32 @@ func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
|
|||
return relay, nil
|
||||
}
|
||||
|
||||
type PublishResult struct {
|
||||
Error error
|
||||
RelayURL string
|
||||
Relay *Relay
|
||||
}
|
||||
|
||||
func (pool *SimplePool) PublishMany(ctx context.Context, urls []string, evt Event) chan PublishResult {
|
||||
ch := make(chan PublishResult, len(urls))
|
||||
|
||||
go func() {
|
||||
for _, url := range urls {
|
||||
relay, err := pool.EnsureRelay(url)
|
||||
if err != nil {
|
||||
ch <- PublishResult{err, url, nil}
|
||||
} else {
|
||||
err = relay.Publish(ctx, evt)
|
||||
ch <- PublishResult{err, url, relay}
|
||||
}
|
||||
}
|
||||
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// SubMany opens a subscription with the given filters to multiple relays
|
||||
// the subscriptions only end when the context is canceled
|
||||
func (pool *SimplePool) SubMany(
|
||||
|
|
3
vendor/github.com/nbd-wtf/go-nostr/relay.go
generated
vendored
3
vendor/github.com/nbd-wtf/go-nostr/relay.go
generated
vendored
|
@ -269,7 +269,7 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
|
|||
}
|
||||
case *ClosedEnvelope:
|
||||
if subscription, ok := r.Subscriptions.Load(subIdToSerial(env.SubscriptionID)); ok {
|
||||
subscription.dispatchClosed(env.Reason)
|
||||
subscription.handleClosed(env.Reason)
|
||||
}
|
||||
case *CountEnvelope:
|
||||
if subscription, ok := r.Subscriptions.Load(subIdToSerial(env.SubscriptionID)); ok && env.Count != nil && subscription.countResult != nil {
|
||||
|
@ -449,6 +449,7 @@ func (r *Relay) QueryEvents(ctx context.Context, filter Filter) (chan *Event, er
|
|||
case <-r.Context().Done():
|
||||
}
|
||||
sub.Unsub()
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
7
vendor/github.com/nbd-wtf/go-nostr/subscription.go
generated
vendored
7
vendor/github.com/nbd-wtf/go-nostr/subscription.go
generated
vendored
|
@ -34,7 +34,6 @@ type Subscription struct {
|
|||
match func(*Event) bool // this will be either Filters.Match or Filters.MatchIgnoringTimestampConstraints
|
||||
live atomic.Bool
|
||||
eosed atomic.Bool
|
||||
closed atomic.Bool
|
||||
cancel context.CancelFunc
|
||||
|
||||
// this keeps track of the events we've received before the EOSE that we must dispatch before
|
||||
|
@ -108,13 +107,13 @@ func (sub *Subscription) dispatchEose() {
|
|||
}
|
||||
}
|
||||
|
||||
func (sub *Subscription) dispatchClosed(reason string) {
|
||||
if sub.closed.CompareAndSwap(false, true) {
|
||||
func (sub *Subscription) handleClosed(reason string) {
|
||||
go func() {
|
||||
sub.ClosedReason <- reason
|
||||
sub.live.Store(false) // set this so we don't send an unnecessary CLOSE to the relay
|
||||
sub.Unsub()
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// Unsub closes the subscription, sending "CLOSE" to relay as in NIP-01.
|
||||
// Unsub() also closes the channel sub.Events and makes a new one.
|
||||
|
|
35
vendor/github.com/nbd-wtf/go-nostr/tags.go
generated
vendored
35
vendor/github.com/nbd-wtf/go-nostr/tags.go
generated
vendored
|
@ -3,9 +3,9 @@ package nostr
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
|
||||
"iter"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Tag []string
|
||||
|
@ -53,7 +53,7 @@ func (tag Tag) Value() string {
|
|||
}
|
||||
|
||||
func (tag Tag) Relay() string {
|
||||
if (tag[0] == "e" || tag[0] == "p") && len(tag) > 2 {
|
||||
if len(tag) > 2 && (tag[0] == "e" || tag[0] == "p") {
|
||||
return NormalizeURL(tag[2])
|
||||
}
|
||||
return ""
|
||||
|
@ -103,7 +103,20 @@ func (tags Tags) GetAll(tagPrefix []string) Tags {
|
|||
return result
|
||||
}
|
||||
|
||||
// FilterOut removes all tags that match the prefix, see [Tag.StartsWith]
|
||||
// All returns an iterator for all the tags that match the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) All(tagPrefix []string) iter.Seq2[int, Tag] {
|
||||
return func(yield func(int, Tag) bool) {
|
||||
for i, v := range tags {
|
||||
if v.StartsWith(tagPrefix) {
|
||||
if !yield(i, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FilterOut returns a new slice with only the elements that match the prefix, see [Tag.StartsWith]
|
||||
func (tags Tags) FilterOut(tagPrefix []string) Tags {
|
||||
filtered := make(Tags, 0, len(tags))
|
||||
for _, v := range tags {
|
||||
|
@ -114,6 +127,20 @@ func (tags Tags) FilterOut(tagPrefix []string) Tags {
|
|||
return filtered
|
||||
}
|
||||
|
||||
// FilterOutInPlace removes all tags that match the prefix, but potentially reorders the tags in unpredictable ways, see [Tag.StartsWith]
|
||||
func (tags *Tags) FilterOutInPlace(tagPrefix []string) {
|
||||
for i := 0; i < len(*tags); i++ {
|
||||
tag := (*tags)[i]
|
||||
if tag.StartsWith(tagPrefix) {
|
||||
// remove this by swapping the last tag into this place
|
||||
last := len(*tags) - 1
|
||||
(*tags)[i] = (*tags)[last]
|
||||
*tags = (*tags)[0:last]
|
||||
i-- // this is so we can match this just swapped item in the next iteration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AppendUnique appends a tag if it doesn't exist yet, otherwise does nothing.
|
||||
// the uniqueness comparison is done based only on the first 2 elements of the tag.
|
||||
func (tags Tags) AppendUnique(tag Tag) Tags {
|
||||
|
|
2
vendor/github.com/nbd-wtf/go-nostr/utils.go
generated
vendored
2
vendor/github.com/nbd-wtf/go-nostr/utils.go
generated
vendored
|
@ -19,7 +19,7 @@ func IsValidRelayURL(u string) bool {
|
|||
}
|
||||
|
||||
func IsValid32ByteHex(thing string) bool {
|
||||
if strings.ToLower(thing) != thing {
|
||||
if !isLowerHex(thing) {
|
||||
return false
|
||||
}
|
||||
if len(thing) != 64 {
|
||||
|
|
6
vendor/github.com/valyala/fasthttp/.golangci.yml
generated
vendored
6
vendor/github.com/valyala/fasthttp/.golangci.yml
generated
vendored
|
@ -9,8 +9,10 @@ linters:
|
|||
enable-all: true
|
||||
disable:
|
||||
- cyclop
|
||||
- copyloopvar
|
||||
- depguard
|
||||
- dupl
|
||||
- err113
|
||||
- errname
|
||||
- errorlint
|
||||
- exhaustive
|
||||
|
@ -25,8 +27,10 @@ linters:
|
|||
- gomnd
|
||||
- gosec
|
||||
- inamedparam
|
||||
- intrange
|
||||
- ireturn
|
||||
- maintidx
|
||||
- mnd
|
||||
- nakedret
|
||||
- nestif
|
||||
- nlreturn
|
||||
|
@ -46,6 +50,8 @@ linters:
|
|||
# Deprecated linters
|
||||
- deadcode
|
||||
- exhaustivestruct
|
||||
- exportloopref
|
||||
- execinquery
|
||||
- golint
|
||||
- ifshort
|
||||
- interfacer
|
||||
|
|
60
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
60
vendor/github.com/valyala/fasthttp/header.go
generated
vendored
|
@ -1509,9 +1509,9 @@ func (h *RequestHeader) setNonSpecial(key, value []byte) {
|
|||
// Multiple headers with the same key may be added with this function.
|
||||
// Use Set for setting a single header for the given key.
|
||||
//
|
||||
// the Content-Type, Content-Length, Connection, Server, Set-Cookie,
|
||||
// Transfer-Encoding and Date headers can only be set once and will
|
||||
// overwrite the previous value.
|
||||
// the Content-Type, Content-Length, Connection, Server, Transfer-Encoding
|
||||
// and Date headers can only be set once and will overwrite the previous value,
|
||||
// while Set-Cookie will not clear previous cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
|
@ -1524,9 +1524,9 @@ func (h *ResponseHeader) Add(key, value string) {
|
|||
// Multiple headers with the same key may be added with this function.
|
||||
// Use SetBytesK for setting a single header for the given key.
|
||||
//
|
||||
// the Content-Type, Content-Length, Connection, Server, Set-Cookie,
|
||||
// Transfer-Encoding and Date headers can only be set once and will
|
||||
// overwrite the previous value.
|
||||
// the Content-Type, Content-Length, Connection, Server, Transfer-Encoding
|
||||
// and Date headers can only be set once and will overwrite the previous value,
|
||||
// while Set-Cookie will not clear previous cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
|
@ -1539,9 +1539,9 @@ func (h *ResponseHeader) AddBytesK(key []byte, value string) {
|
|||
// Multiple headers with the same key may be added with this function.
|
||||
// Use SetBytesV for setting a single header for the given key.
|
||||
//
|
||||
// the Content-Type, Content-Length, Connection, Server, Set-Cookie,
|
||||
// Transfer-Encoding and Date headers can only be set once and will
|
||||
// overwrite the previous value.
|
||||
// the Content-Type, Content-Length, Connection, Server, Transfer-Encoding
|
||||
// and Date headers can only be set once and will overwrite the previous value,
|
||||
// while Set-Cookie will not clear previous cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
|
@ -1554,9 +1554,9 @@ func (h *ResponseHeader) AddBytesV(key string, value []byte) {
|
|||
// Multiple headers with the same key may be added with this function.
|
||||
// Use SetBytesKV for setting a single header for the given key.
|
||||
//
|
||||
// the Content-Type, Content-Length, Connection, Server, Set-Cookie,
|
||||
// Transfer-Encoding and Date headers can only be set once and will
|
||||
// overwrite the previous value.
|
||||
// the Content-Type, Content-Length, Connection, Server, Transfer-Encoding
|
||||
// and Date headers can only be set once and will overwrite the previous value,
|
||||
// while the Set-Cookie header will not clear previous cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
|
@ -1571,6 +1571,9 @@ func (h *ResponseHeader) AddBytesKV(key, value []byte) {
|
|||
|
||||
// Set sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Set-Cookie header will not clear previous cookies,
|
||||
// use SetCookie instead to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
//
|
||||
|
@ -1582,6 +1585,9 @@ func (h *ResponseHeader) Set(key, value string) {
|
|||
|
||||
// SetBytesK sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Set-Cookie header will not clear previous cookies,
|
||||
// use SetCookie instead to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
//
|
||||
|
@ -1593,6 +1599,9 @@ func (h *ResponseHeader) SetBytesK(key []byte, value string) {
|
|||
|
||||
// SetBytesV sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Set-Cookie header will not clear previous cookies,
|
||||
// use SetCookie instead to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
//
|
||||
|
@ -1604,6 +1613,9 @@ func (h *ResponseHeader) SetBytesV(key string, value []byte) {
|
|||
|
||||
// SetBytesKV sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Set-Cookie header will not clear previous cookies,
|
||||
// use SetCookie instead to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
//
|
||||
|
@ -1617,6 +1629,9 @@ func (h *ResponseHeader) SetBytesKV(key, value []byte) {
|
|||
// SetCanonical sets the given 'key: value' header assuming that
|
||||
// key is in canonical form.
|
||||
//
|
||||
// Please note that the Set-Cookie header will not clear previous cookies,
|
||||
// use SetCookie instead to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked response body.
|
||||
func (h *ResponseHeader) SetCanonical(key, value []byte) {
|
||||
|
@ -1765,9 +1780,9 @@ func (h *RequestHeader) AddBytesV(key string, value []byte) {
|
|||
// Multiple headers with the same key may be added with this function.
|
||||
// Use SetBytesKV for setting a single header for the given key.
|
||||
//
|
||||
// the Content-Type, Content-Length, Connection, Cookie,
|
||||
// Transfer-Encoding, Host and User-Agent headers can only be set once
|
||||
// and will overwrite the previous value.
|
||||
// the Content-Type, Content-Length, Connection, Transfer-Encoding,
|
||||
// Host and User-Agent headers can only be set once and will overwrite
|
||||
// the previous value, while the Cookie header will not clear previous cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see AddTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
|
@ -1782,6 +1797,9 @@ func (h *RequestHeader) AddBytesKV(key, value []byte) {
|
|||
|
||||
// Set sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Cookie header will not clear previous cookies,
|
||||
// delete cookies before calling in order to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
//
|
||||
|
@ -1793,6 +1811,9 @@ func (h *RequestHeader) Set(key, value string) {
|
|||
|
||||
// SetBytesK sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Cookie header will not clear previous cookies,
|
||||
// delete cookies before calling in order to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
//
|
||||
|
@ -1804,6 +1825,9 @@ func (h *RequestHeader) SetBytesK(key []byte, value string) {
|
|||
|
||||
// SetBytesV sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Cookie header will not clear previous cookies,
|
||||
// delete cookies before calling in order to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
//
|
||||
|
@ -1815,6 +1839,9 @@ func (h *RequestHeader) SetBytesV(key string, value []byte) {
|
|||
|
||||
// SetBytesKV sets the given 'key: value' header.
|
||||
//
|
||||
// Please note that the Cookie header will not clear previous cookies,
|
||||
// delete cookies before calling in order to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
//
|
||||
|
@ -1828,6 +1855,9 @@ func (h *RequestHeader) SetBytesKV(key, value []byte) {
|
|||
// SetCanonical sets the given 'key: value' header assuming that
|
||||
// key is in canonical form.
|
||||
//
|
||||
// Please note that the Cookie header will not clear previous cookies,
|
||||
// delete cookies before calling in order to reset cookies.
|
||||
//
|
||||
// If the header is set as a Trailer (forbidden trailers will not be set, see SetTrailer for more details),
|
||||
// it will be sent after the chunked request body.
|
||||
func (h *RequestHeader) SetCanonical(key, value []byte) {
|
||||
|
|
10
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
10
vendor/github.com/valyala/fasthttp/http.go
generated
vendored
|
@ -192,6 +192,16 @@ func (req *Request) SetConnectionClose() {
|
|||
req.Header.SetConnectionClose()
|
||||
}
|
||||
|
||||
// GetTimeOut retrieves the timeout duration set for the Request.
|
||||
//
|
||||
// This method returns a time.Duration that determines how long the request
|
||||
// can wait before it times out. In the default use case, the timeout applies
|
||||
// to the entire request lifecycle, including both receiving the response
|
||||
// headers and the response body.
|
||||
func (req *Request) GetTimeOut() time.Duration {
|
||||
return req.timeout
|
||||
}
|
||||
|
||||
// SendFile registers file on the given path to be used as response body
|
||||
// when Write is called.
|
||||
//
|
||||
|
|
1
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
1
vendor/github.com/valyala/fasthttp/server.go
generated
vendored
|
@ -2789,6 +2789,7 @@ func (ctx *RequestCtx) Value(key any) any {
|
|||
}
|
||||
|
||||
var fakeServer = &Server{
|
||||
done: make(chan struct{}),
|
||||
// Initialize concurrencyCh for TimeoutHandler
|
||||
concurrencyCh: make(chan struct{}, DefaultConcurrency),
|
||||
}
|
||||
|
|
14
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
14
vendor/github.com/valyala/fasthttp/uri.go
generated
vendored
|
@ -536,21 +536,11 @@ func shouldEscape(c byte, mode encoding) bool {
|
|||
}
|
||||
|
||||
func ishex(c byte) bool {
|
||||
return ('0' <= c && c <= '9') ||
|
||||
('a' <= c && c <= 'f') ||
|
||||
('A' <= c && c <= 'F')
|
||||
return hex2intTable[c] < 16
|
||||
}
|
||||
|
||||
func unhex(c byte) byte {
|
||||
switch {
|
||||
case '0' <= c && c <= '9':
|
||||
return c - '0'
|
||||
case 'a' <= c && c <= 'f':
|
||||
return c - 'a' + 10
|
||||
case 'A' <= c && c <= 'F':
|
||||
return c - 'A' + 10
|
||||
}
|
||||
return 0
|
||||
return hex2intTable[c] & 15
|
||||
}
|
||||
|
||||
// validOptionalPort reports whether port is either an empty string
|
||||
|
|
26
vendor/modules.txt
vendored
26
vendor/modules.txt
vendored
|
@ -5,6 +5,9 @@ git.devvul.com/asara/gologger
|
|||
## explicit; go 1.13
|
||||
github.com/andybalholm/brotli
|
||||
github.com/andybalholm/brotli/matchfinder
|
||||
# github.com/bep/debounce v1.2.1
|
||||
## explicit
|
||||
github.com/bep/debounce
|
||||
# github.com/btcsuite/btcd/btcec/v2 v2.3.4
|
||||
## explicit; go 1.17
|
||||
github.com/btcsuite/btcd/btcec/v2
|
||||
|
@ -12,6 +15,9 @@ github.com/btcsuite/btcd/btcec/v2/schnorr
|
|||
# github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
|
||||
## explicit; go 1.17
|
||||
github.com/btcsuite/btcd/chaincfg/chainhash
|
||||
# github.com/cespare/xxhash v1.1.0
|
||||
## explicit
|
||||
github.com/cespare/xxhash
|
||||
# github.com/decred/dcrd/crypto/blake256 v1.1.0
|
||||
## explicit; go 1.17
|
||||
github.com/decred/dcrd/crypto/blake256
|
||||
|
@ -23,12 +29,12 @@ github.com/decred/dcrd/dcrec/secp256k1/v4/schnorr
|
|||
# github.com/fasthttp/websocket v1.5.10
|
||||
## explicit; go 1.20
|
||||
github.com/fasthttp/websocket
|
||||
# github.com/fiatjaf/eventstore v0.11.3
|
||||
# github.com/fiatjaf/eventstore v0.12.0
|
||||
## explicit; go 1.23.0
|
||||
github.com/fiatjaf/eventstore
|
||||
github.com/fiatjaf/eventstore/postgresql
|
||||
# github.com/fiatjaf/khatru v0.8.3
|
||||
## explicit; go 1.23.0
|
||||
# github.com/fiatjaf/khatru v0.9.1
|
||||
## explicit; go 1.23.1
|
||||
github.com/fiatjaf/khatru
|
||||
github.com/fiatjaf/khatru/policies
|
||||
# github.com/gobwas/httphead v0.1.0
|
||||
|
@ -45,6 +51,9 @@ github.com/gobwas/pool/pbytes
|
|||
github.com/gobwas/ws
|
||||
github.com/gobwas/ws/wsflate
|
||||
github.com/gobwas/ws/wsutil
|
||||
# github.com/greatroar/blobloom v0.8.0
|
||||
## explicit; go 1.14
|
||||
github.com/greatroar/blobloom
|
||||
# github.com/jmoiron/sqlx v1.4.0
|
||||
## explicit; go 1.10
|
||||
github.com/jmoiron/sqlx
|
||||
|
@ -81,11 +90,14 @@ github.com/mattn/go-colorable
|
|||
# github.com/mattn/go-isatty v0.0.20
|
||||
## explicit; go 1.15
|
||||
github.com/mattn/go-isatty
|
||||
# github.com/nbd-wtf/go-nostr v0.38.2
|
||||
## explicit; go 1.23.0
|
||||
# github.com/nbd-wtf/go-nostr v0.40.1
|
||||
## explicit; go 1.23.1
|
||||
github.com/nbd-wtf/go-nostr
|
||||
github.com/nbd-wtf/go-nostr/nip11
|
||||
github.com/nbd-wtf/go-nostr/nip42
|
||||
github.com/nbd-wtf/go-nostr/nip77
|
||||
github.com/nbd-wtf/go-nostr/nip77/negentropy
|
||||
github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector
|
||||
github.com/nbd-wtf/go-nostr/nip86
|
||||
# github.com/puzpuzpuz/xsync/v3 v3.4.0
|
||||
## explicit; go 1.18
|
||||
|
@ -114,8 +126,8 @@ github.com/tidwall/pretty
|
|||
# github.com/valyala/bytebufferpool v1.0.0
|
||||
## explicit
|
||||
github.com/valyala/bytebufferpool
|
||||
# github.com/valyala/fasthttp v1.56.0
|
||||
## explicit; go 1.20
|
||||
# github.com/valyala/fasthttp v1.57.0
|
||||
## explicit; go 1.21
|
||||
github.com/valyala/fasthttp
|
||||
github.com/valyala/fasthttp/fasthttputil
|
||||
github.com/valyala/fasthttp/stackless
|
||||
|
|
Loading…
Reference in a new issue