diff --git a/alby/utils.go b/alby/utils.go
new file mode 100644
index 0000000..5748dd7
--- /dev/null
+++ b/alby/utils.go
@@ -0,0 +1,274 @@
+package alby
+
+import (
+	"context"
+	"encoding/json"
+	"sync"
+	"time"
+
+	"git.devvul.com/asara/gologger"
+	"git.devvul.com/asara/well-goknown/config"
+	"github.com/davecgh/go-spew/spew"
+	"github.com/nbd-wtf/go-nostr"
+	"github.com/nbd-wtf/go-nostr/nip04"
+)
+
+// check if event is valid
+func checkEvent(n string) bool {
+	var zapEvent ZapEvent
+	l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
+
+	err := json.Unmarshal([]byte(n), &zapEvent)
+	if err != nil {
+		l.Debug().Msgf("unable to unmarshal nwc value: %s", err.Error())
+		return false
+	}
+
+	if err != nil {
+		l.Debug().Msgf("unable to read tags from nostr request: %s", err.Error())
+		return false
+	}
+
+	evt := nostr.Event{
+		ID:        zapEvent.Id,
+		PubKey:    zapEvent.Pubkey,
+		CreatedAt: zapEvent.CreatedAt,
+		Kind:      zapEvent.Kind,
+		Tags:      zapEvent.Tags,
+		Content:   zapEvent.Content,
+		Sig:       zapEvent.Signature,
+	}
+
+	ok, err := evt.CheckSignature()
+	if !ok {
+		l.Debug().Msgf("event is invalid", err.Error())
+		return false
+	}
+	return true
+}
+
+// background task to return a receipt when the payment is paid
+func watchForReceipt(nEvent string, secret NWCSecret, invoice string) {
+	var zapEvent ZapEvent
+	l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
+	_, cancel := context.WithTimeout(context.Background(), 3*time.Second)
+	defer cancel()
+
+	ok := checkEvent(nEvent)
+	if !ok {
+		l.Debug().Msgf("nostr event is not valid")
+		return
+	}
+
+	err := json.Unmarshal([]byte(nEvent), &zapEvent)
+	if err != nil {
+		l.Debug().Msgf("unable to unmarshal nwc value: %s", err.Error())
+		return
+	}
+
+	ticker := time.NewTicker(30 * time.Second)
+	quit := make(chan struct{})
+
+	go func() {
+		defer ticker.Stop()
+		for {
+			select {
+			case <-quit:
+				return
+			case _ = <-ticker.C:
+				paid, failed, result := checkInvoicePaid(invoice, secret, nEvent)
+				if failed {
+					close(quit)
+					return
+				}
+				if paid {
+					sendReceipt(secret, result, nEvent)
+					close(quit)
+					return
+				}
+			}
+		}
+		defer close(quit)
+	}()
+}
+
+func checkInvoicePaid(checkInvoice string, secret NWCSecret, nEvent string) (bool, bool, LookupInvoiceResponse) {
+	l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
+	invoiceParams := LookupInvoiceParams{
+		Invoice: checkInvoice,
+	}
+
+	invoice := LookupInvoice{
+		Method: "lookup_invoice",
+		Params: invoiceParams,
+	}
+
+	invoiceJson, err := json.Marshal(invoice)
+	if err != nil {
+		l.Debug().Msgf("unable to marshal invoice: %s", err.Error())
+		return false, true, LookupInvoiceResponse{}
+	}
+
+	// generate nip-04 shared secret
+	sharedSecret, err := nip04.ComputeSharedSecret(secret.AppPubkey, secret.Secret)
+	if err != nil {
+		l.Debug().Msgf("unable to marshal invoice: %s", err.Error())
+		return false, true, LookupInvoiceResponse{}
+	}
+
+	// create the encrypted content payload
+	encryptedContent, err := nip04.Encrypt(string(invoiceJson), sharedSecret)
+	if err != nil {
+		l.Debug().Msgf("unable to marshal invoice: %s", err.Error())
+		return false, true, LookupInvoiceResponse{}
+	}
+
+	recipient := nostr.Tag{"p", secret.AppPubkey}
+	nwcEv := nostr.Event{
+		PubKey:    secret.AppPubkey,
+		CreatedAt: nostr.Now(),
+		Kind:      nostr.KindNWCWalletRequest,
+		Tags:      nostr.Tags{recipient},
+		Content:   encryptedContent,
+	}
+
+	// sign the message with the app token
+	nwcEv.Sign(secret.Secret)
+
+	var filters nostr.Filters
+	t := make(map[string][]string)
+	t["e"] = []string{nwcEv.GetID()}
+	filters = []nostr.Filter{
+		{
+			Kinds: []int{
+				nostr.KindNWCWalletResponse,
+			},
+			Tags: t,
+		},
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer cancel()
+
+	relay, err := nostr.RelayConnect(ctx, secret.Relay)
+	subCtx, subCancel := context.WithTimeout(context.Background(), 5*time.Second)
+	defer subCancel()
+
+	// subscribe to the filter
+	sub, err := relay.Subscribe(subCtx, filters)
+	if err != nil {
+		l.Debug().Msgf("unable to connect to relay: %s", err.Error())
+		return false, false, LookupInvoiceResponse{}
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+
+	// watch for the invoice
+	evs := make([]nostr.Event, 0)
+	go func() {
+		defer wg.Done()
+		for {
+			select {
+			case ev, ok := <-sub.Events:
+				if !ok {
+					l.Debug().Msgf("subscription events channel is closed")
+					return
+				}
+				if ev.Kind != 0 {
+					evs = append(evs, *ev)
+				}
+				if len(evs) > 0 {
+					return
+				}
+			case <-sub.EndOfStoredEvents:
+				l.Trace().Msgf("end of stored events received")
+			case <-ctx.Done():
+				l.Debug().Msgf("subscription context cancelled or done: %v", ctx.Err())
+				return
+			}
+		}
+	}()
+
+	// publish the invoice request
+	if err := relay.Publish(ctx, nwcEv); err != nil {
+		l.Debug().Msgf("unable to publish event: %s", err.Error())
+		return false, false, LookupInvoiceResponse{}
+	}
+	// wait for the invoice to get returned
+	wg.Wait()
+
+	// decrypt the invoice
+	response, err := nip04.Decrypt(evs[0].Content, sharedSecret)
+	resStruct := LookupInvoiceResponse{}
+	err = json.Unmarshal([]byte(response), &resStruct)
+	if err != nil {
+		l.Debug().Msgf("unable to unmarshal invoice response: %s", err.Error())
+		return false, true, LookupInvoiceResponse{}
+	}
+
+	if settled := resStruct.Result.isSettled(); settled {
+		return true, false, resStruct
+	}
+
+	if expired := resStruct.Result.isExpired(); expired {
+		return false, true, LookupInvoiceResponse{}
+	}
+
+	return false, false, LookupInvoiceResponse{}
+}
+
+func sendReceipt(secret NWCSecret, result LookupInvoiceResponse, nEvent string) {
+	l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
+	var zapRequestEvent ZapEvent
+	err := json.Unmarshal([]byte(nEvent), &zapRequestEvent)
+	if err != nil {
+		return
+	}
+
+	zapReceipt := nostr.Event{
+		PubKey:    secret.ClientPubkey,
+		CreatedAt: result.Result.SettledAt,
+		Kind:      nostr.KindNWCWalletResponse,
+		Tags:      zapRequestEvent.Tags,
+		Content:   "",
+	}
+
+	// add context to zapReceipt
+	sender := nostr.Tag{"P", zapRequestEvent.Pubkey}
+	bolt11 := nostr.Tag{"bolt11", result.Result.Invoice}
+	preimage := nostr.Tag{"preimage", result.Result.Preimage}
+	description := nostr.Tag{"description", nEvent}
+
+	zapReceipt.Tags = zapReceipt.Tags.AppendUnique(sender)
+	zapReceipt.Tags = zapReceipt.Tags.AppendUnique(bolt11)
+	zapReceipt.Tags = zapReceipt.Tags.AppendUnique(preimage)
+	zapReceipt.Tags = zapReceipt.Tags.AppendUnique(description)
+
+	// remove unneeded values from tags
+	zapReceipt.Tags = zapReceipt.Tags.FilterOut([]string{"relays"})
+	zapReceipt.Tags = zapReceipt.Tags.FilterOut([]string{"alt"})
+
+	// sign the receipt
+	zapReceipt.Sign(secret.Secret)
+
+	// send it to the listed relays
+	ctx := context.Background()
+	relayTag := zapRequestEvent.Tags.GetFirst([]string{"relays"})
+
+	for _, url := range *relayTag {
+		spew.Dump(url)
+		relay, err := nostr.RelayConnect(ctx, url)
+		if err != nil {
+			l.Debug().Msgf("unable to connect to relay (%s): %s", url, err.Error())
+			return
+		}
+		if err := relay.Publish(ctx, zapReceipt); err != nil {
+			l.Debug().Msgf("unable to publish to relay (%s): %s", url, err.Error())
+			return
+		}
+
+		l.Debug().Msgf("published recipet to: ")
+	}
+	return
+}
diff --git a/alby/well-known.go b/alby/well-known.go
index 45690bc..63044b7 100644
--- a/alby/well-known.go
+++ b/alby/well-known.go
@@ -2,6 +2,8 @@ package alby
 
 import (
 	"context"
+	"crypto/sha256"
+	"encoding/hex"
 	"encoding/json"
 	"fmt"
 	"net"
@@ -25,9 +27,9 @@ var (
 )
 
 type AlbyApp struct {
-	Id          int32  `json:"id"`
-	Name        string `json:"name"`
-	NostrPubkey string `json:"nostrPubkey"`
+	Id        int32  `json:"id"`
+	Name      string `json:"name"`
+	AppPubkey string `json:"appPubkey"`
 }
 
 type AlbyApps []AlbyApp
@@ -49,14 +51,14 @@ type lnurlpError struct {
 	Reason string `json:"reason"`
 }
 
-type NWCReqNostr struct {
-	Id        string     `json:"id"`
-	Pubkey    string     `json:"pubkey"`
-	CreatedAt int64      `json:"created_at"`
-	Kind      int32      `json:"kind"`
-	Tags      [][]string `json:"tags"`
-	Content   string     `json:"content"`
-	Signature string     `json:"sig"`
+type ZapEvent struct {
+	Id        string          `json:"id"`
+	Pubkey    string          `json:"pubkey"`
+	CreatedAt nostr.Timestamp `json:"created_at"`
+	Kind      int             `json:"kind"`
+	Tags      nostr.Tags      `json:"tags"`
+	Content   string          `json:"content"`
+	Signature string          `json:"sig"`
 }
 
 type NWCReq struct {
@@ -88,6 +90,50 @@ func (s *NWCSecret) decodeSecret() {
 
 }
 
+type LookupInvoiceParams struct {
+	Invoice string `json:"invoice"`
+}
+
+type LookupInvoice struct {
+	Method string              `json:"method"`
+	Params LookupInvoiceParams `json:"params"`
+}
+
+type LookupInvoiceResponseResult struct {
+	Type            string          `json:"type"`
+	State           string          `json:"state"`
+	Invoice         string          `json:"invoice"`
+	Description     string          `json:"description"`
+	DescriptionHash string          `json:"description_hash"`
+	Preimage        string          `json:"preimage"`
+	PaymentHash     string          `json:"payment_hash"`
+	Amount          int64           `json:"amount"`
+	FeesPaid        int64           `json:"fees_paid"`
+	CreatedAt       nostr.Timestamp `json:"created_at"`
+	ExpiresAt       nostr.Timestamp `json:"expires_at"`
+	SettledAt       nostr.Timestamp `json:"settled_at"`
+	Metadata        string          `json:"metadata"`
+}
+
+func (s *LookupInvoiceResponseResult) isExpired() bool {
+	if time.Now().Unix() > s.ExpiresAt.Time().Unix() {
+		return true
+	}
+	return false
+}
+
+func (s *LookupInvoiceResponseResult) isSettled() bool {
+	if s.SettledAt.Time().Unix() != 0 {
+		return true
+	}
+	return false
+}
+
+type LookupInvoiceResponse struct {
+	Result     LookupInvoiceResponseResult `json:"result"`
+	ResultType string                      `json:"result_type"`
+}
+
 type MakeInvoiceParams struct {
 	Amount          int64  `json:"amount"`
 	Description     string `json:"description"`
@@ -171,12 +217,12 @@ func GetLnurlp(w http.ResponseWriter, r *http.Request) {
 	var npk string
 	for _, element := range albyApps {
 		if element.Name == name {
-			npk = element.NostrPubkey
+			npk = element.AppPubkey
 		}
 	}
 
 	if len(npk) == 0 {
-		l.Debug().Msgf("user doesn't exist in alby %s@%s: %s", name, domain, err.Error())
+		l.Debug().Msgf("user doesn't exist in alby %s@%s", name, domain)
 		lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "user does not exist"}
 		retError, _ := json.Marshal(lnurlpReturnError)
 		w.WriteHeader(http.StatusNotFound)
@@ -207,7 +253,7 @@ func GetLnurlp(w http.ResponseWriter, r *http.Request) {
 		MaxSendable:    10000000,
 		Metadata:       fmt.Sprintf("[[\"text/plain\", \"ln address payment to %s on the devvul server\"],[\"text/identifier\", \"%s@%s\"]]", name, name, domain),
 		AllowsNostr:    true,
-		NostrPubkey:    secret.AppPubkey,
+		NostrPubkey:    secret.ClientPubkey,
 	}
 
 	ret, err := json.Marshal(lnurlpReturn)
@@ -229,7 +275,7 @@ func GetLnurlp(w http.ResponseWriter, r *http.Request) {
 func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 	l := gologger.Get(config.GetConfig().LogLevel).With().Caller().Logger()
 	var nwc NWCReq
-	var nwcNostr NWCReqNostr
+	var zapEvent ZapEvent
 
 	// normalize domain
 	domain, _, err := net.SplitHostPort(r.Host)
@@ -257,14 +303,26 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 	}
 	secret.decodeSecret()
 
-	err = json.Unmarshal([]byte(nwc.Nostr), &nwcNostr)
-	if err != nil {
-		l.Debug().Msgf("unable to unmarshal nwc value: %s", err.Error())
-		lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unable to connect to relay"}
-		retError, _ := json.Marshal(lnurlpReturnError)
-		w.WriteHeader(http.StatusNotFound)
-		w.Write(retError)
-		return
+	// if there is a nostr payload unmarshal it
+	if nwc.Nostr != "" {
+		err = json.Unmarshal([]byte(nwc.Nostr), &zapEvent)
+		if err != nil {
+			l.Debug().Msgf("unable to unmarshal nwc value: %s", err.Error())
+			lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unable to connect to relay"}
+			retError, _ := json.Marshal(lnurlpReturnError)
+			w.WriteHeader(http.StatusNotFound)
+			w.Write(retError)
+			return
+		}
+		ok := checkEvent(nwc.Nostr)
+		if !ok {
+			l.Debug().Msgf("nostr event is not valid", err.Error())
+			lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "check your request and try again"}
+			retError, _ := json.Marshal(lnurlpReturnError)
+			w.WriteHeader(http.StatusNotFound)
+			w.Write(retError)
+			return
+		}
 	}
 
 	// connect to the relay
@@ -291,10 +349,20 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
+	var hash string
+	if nwc.Nostr != "" {
+		sha := sha256.Sum256([]byte(nwc.Nostr))
+		hash = hex.EncodeToString(sha[:])
+	}
+	if nwc.Nostr == "" {
+		hash = ""
+	}
+
 	invoiceParams := MakeInvoiceParams{
-		Amount:      amt,
-		Description: nwc.Comment,
-		Expiry:      300,
+		Amount:          amt,
+		Description:     nwc.Nostr,
+		DescriptionHash: hash,
+		Expiry:          300,
 	}
 
 	invoice := MakeInvoice{
@@ -302,7 +370,6 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		Params: invoiceParams,
 	}
 
-	// marshal the json
 	invoiceJson, err := json.Marshal(invoice)
 	if err != nil {
 		l.Debug().Msgf("unable to marshal invoice: %s", err.Error())
@@ -337,7 +404,7 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 
 	recipient := nostr.Tag{"p", secret.AppPubkey}
 	nwcEv := nostr.Event{
-		PubKey:    nwcNostr.Pubkey,
+		PubKey:    secret.ClientPubkey,
 		CreatedAt: nostr.Now(),
 		Kind:      nostr.KindNWCWalletRequest,
 		Tags:      nostr.Tags{recipient},
@@ -359,7 +426,7 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		},
 	}
 
-	subCtx, subCancel := context.WithTimeout(context.Background(), 10*time.Second)
+	subCtx, subCancel := context.WithTimeout(context.Background(), 5*time.Second)
 	defer subCancel()
 	sub, err := relay.Subscribe(subCtx, filters)
 	if err != nil {
@@ -374,7 +441,7 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 	var wg sync.WaitGroup
 	wg.Add(1)
 
-	// append new messages to slice
+	// watch for the invoice
 	evs := make([]nostr.Event, 0, 1)
 	go func() {
 		defer wg.Done()
@@ -400,6 +467,7 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		}
 	}()
 
+	// publish the invoice request
 	if err := relay.Publish(relayCtx, nwcEv); err != nil {
 		l.Debug().Msgf("unable to marshal invoice: %s", err.Error())
 		lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unable to create an invoice"}
@@ -408,11 +476,13 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		w.Write(retError)
 		return
 	}
+	// wait for the invoice to get returned
 	wg.Wait()
 
+	// decrypt the invoice
 	response, err := nip04.Decrypt(evs[0].Content, sharedSecret)
 	resStruct := MakeInvoiceResponse{}
-	json.Unmarshal([]byte(response), &resStruct)
+	err = json.Unmarshal([]byte(response), &resStruct)
 	if err != nil {
 		l.Debug().Msgf("unable to create invoice: %s", err.Error())
 		lnurlpReturnError := &lnurlpError{Status: "ERROR", Reason: "unable to connect to relay"}
@@ -427,6 +497,7 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		Routes:  []int{},
 	}
 
+	// return the invoice to the requester
 	ret, err := json.Marshal(retStruct)
 	if err != nil {
 		l.Error().Msgf("unable to marshal json for invoice: %s", err.Error())
@@ -436,6 +507,10 @@ func GetLnurlpCallback(w http.ResponseWriter, r *http.Request) {
 		w.Write(retError)
 		return
 	}
+	if nwc.Nostr != "" {
+		l.Debug().Msgf("starting background job for invoice")
+		go watchForReceipt(nwc.Nostr, secret, retStruct.Invoice)
+	}
 
 	l.Info().Msg("returning lnurl-p payload")
 	w.WriteHeader(http.StatusOK)
diff --git a/go.mod b/go.mod
index 08dc8ee..fdb026f 100644
--- a/go.mod
+++ b/go.mod
@@ -4,12 +4,13 @@ go 1.23.3
 
 require (
 	git.devvul.com/asara/gologger v0.9.0
-	github.com/fiatjaf/eventstore v0.14.4
-	github.com/fiatjaf/khatru v0.14.0
+	github.com/davecgh/go-spew v1.1.1
+	github.com/fiatjaf/eventstore v0.16.0
+	github.com/fiatjaf/khatru v0.15.0
 	github.com/gorilla/schema v1.4.1
 	github.com/jmoiron/sqlx v1.4.0
 	github.com/lib/pq v1.10.9
-	github.com/nbd-wtf/go-nostr v0.45.0
+	github.com/nbd-wtf/go-nostr v0.48.0
 )
 
 require (
@@ -18,12 +19,10 @@ require (
 	github.com/bep/debounce v1.2.1 // indirect
 	github.com/btcsuite/btcd/btcec/v2 v2.3.4 // indirect
 	github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 // indirect
+	github.com/coder/websocket v1.8.12 // indirect
 	github.com/decred/dcrd/crypto/blake256 v1.1.0 // indirect
 	github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect
 	github.com/fasthttp/websocket v1.5.12 // indirect
-	github.com/gobwas/httphead v0.1.0 // indirect
-	github.com/gobwas/pool v0.2.1 // indirect
-	github.com/gobwas/ws v1.4.0 // indirect
 	github.com/josharian/intern v1.0.0 // indirect
 	github.com/json-iterator/go v1.1.12 // indirect
 	github.com/klauspost/compress v1.17.11 // indirect
@@ -32,7 +31,7 @@ require (
 	github.com/mattn/go-isatty v0.0.20 // indirect
 	github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
 	github.com/modern-go/reflect2 v1.0.2 // indirect
-	github.com/puzpuzpuz/xsync/v3 v3.4.0 // indirect
+	github.com/puzpuzpuz/xsync/v3 v3.4.1 // indirect
 	github.com/rs/cors v1.11.1 // indirect
 	github.com/rs/zerolog v1.33.0 // indirect
 	github.com/savsgio/gotils v0.0.0-20240704082632-aef3928b8a38 // indirect
diff --git a/go.sum b/go.sum
index e53489b..ed7456a 100644
--- a/go.sum
+++ b/go.sum
@@ -12,6 +12,8 @@ github.com/btcsuite/btcd/btcec/v2 v2.3.4 h1:3EJjcN70HCu/mwqlUsGK8GcNVyLVxFDlWurT
 github.com/btcsuite/btcd/btcec/v2 v2.3.4/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04=
 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0 h1:59Kx4K6lzOW5w6nFlA0v5+lk/6sjybR934QNHSJZPTQ=
 github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc=
+github.com/coder/websocket v1.8.12 h1:5bUXkEPPIbewrnkU8LTCLVaxi4N4J8ahufH2vlo4NAo=
+github.com/coder/websocket v1.8.12/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -22,18 +24,12 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 h1:rpfIENRNNilwHwZeG5+P150SMrnN
 github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0=
 github.com/fasthttp/websocket v1.5.12 h1:e4RGPpWW2HTbL3zV0Y/t7g0ub294LkiuXXUuTOUInlE=
 github.com/fasthttp/websocket v1.5.12/go.mod h1:I+liyL7/4moHojiOgUOIKEWm9EIxHqxZChS+aMFltyg=
-github.com/fiatjaf/eventstore v0.14.4 h1:bqJQit/M5E6vwbWwgrL4kTPoWCbt1Hb9H/AH4xf9uVQ=
-github.com/fiatjaf/eventstore v0.14.4/go.mod h1:3Kkujc6A8KjpNvSKu1jNCcFjSgEEyCxaDJVgShHz0J8=
-github.com/fiatjaf/khatru v0.14.0 h1:zpWlAA87XBpDKBPIDbAuNw/HpKXzyt5XHVDbSvUbmDo=
-github.com/fiatjaf/khatru v0.14.0/go.mod h1:uxE5e8DBXPZqbHjr/gfatQas5bEJIMmsOCDcdF4LoRQ=
+github.com/fiatjaf/eventstore v0.16.0 h1:r26aJeOwJTCbEevU8RVqp9FlcAgzKKqUWFH//x+Y+7M=
+github.com/fiatjaf/eventstore v0.16.0/go.mod h1:KAsld5BhkmSck48aF11Txu8X+OGNmoabw4TlYVWqInc=
+github.com/fiatjaf/khatru v0.15.0 h1:0aLWiTrdzoKD4WmW35GWL/Jsn4dACCUw325JKZg/AmI=
+github.com/fiatjaf/khatru v0.15.0/go.mod h1:GBQJXZpitDatXF9RookRXcWB5zCJclCE4ufDK3jk80g=
 github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
 github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
-github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU=
-github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM=
-github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og=
-github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw=
-github.com/gobwas/ws v1.4.0 h1:CTaoG1tojrh4ucGPcoJFiAQUAsEWekEWvLy7GsVNqGs=
-github.com/gobwas/ws v1.4.0/go.mod h1:G3gNqMNtPppf5XUz7O4shetPpcZ1VJ7zt18dlUeakrc=
 github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/gorilla/schema v1.4.1 h1:jUg5hUjCSDZpNGLuXQOgIWGdlgrIdYvgQ0wZtdK1M3E=
@@ -65,13 +61,13 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
 github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
-github.com/nbd-wtf/go-nostr v0.45.0 h1:4WaMg0Yvda9gBcyRq9KtI32lPeFY8mbX0eFlfdnLrSE=
-github.com/nbd-wtf/go-nostr v0.45.0/go.mod h1:m0ID2gSA2Oak/uaPnM1uN22JhDRZS4UVJG2c8jo19rg=
+github.com/nbd-wtf/go-nostr v0.48.0 h1:GYu6k6wRzSxYpra4pzMRk3R8xdUW8fac+trQtt6YD0o=
+github.com/nbd-wtf/go-nostr v0.48.0/go.mod h1:O6n8bv+KktkEs+4svL7KN/OSnOWB5LzcZbuKjxnpRD0=
 github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+mJ4=
-github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
+github.com/puzpuzpuz/xsync/v3 v3.4.1 h1:wWXLKXwzpsduC3kUSahiL45MWxkGb+AQG0dsri4iftA=
+github.com/puzpuzpuz/xsync/v3 v3.4.1/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
 github.com/rs/cors v1.11.1 h1:eU3gRzXLRK57F5rKMGMZURNdIG4EoAmX8k94r9wXWHA=
 github.com/rs/cors v1.11.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
 github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
diff --git a/nostr/relay.go b/nostr/relay.go
index 5a1d70d..bdce291 100644
--- a/nostr/relay.go
+++ b/nostr/relay.go
@@ -2,7 +2,6 @@ package nostr
 
 import (
 	"context"
-	"time"
 
 	"git.devvul.com/asara/well-goknown/config"
 	"github.com/fiatjaf/eventstore/postgresql"
@@ -57,12 +56,12 @@ func NewRelay(version string) *khatru.Relay {
 	relay.QueryEvents = append(relay.QueryEvents, RelayDb.QueryEvents)
 	relay.CountEvents = append(relay.CountEvents, RelayDb.CountEvents)
 	relay.DeleteEvent = append(relay.DeleteEvent, RelayDb.DeleteEvent)
+	relay.ReplaceEvent = append(relay.ReplaceEvent, RelayDb.ReplaceEvent)
 
 	// apply policies
 	relay.RejectEvent = append(relay.RejectEvent,
 		RejectUnregisteredNpubs,
 		policies.ValidateKind,
-		policies.EventIPRateLimiter(25, time.Minute*1, 100),
 	)
 
 	relay.RejectFilter = append(relay.RejectFilter,
@@ -71,9 +70,5 @@ func NewRelay(version string) *khatru.Relay {
 		policies.NoComplexFilters,
 	)
 
-	relay.RejectConnection = append(relay.RejectConnection,
-		policies.ConnectionRateLimiter(50, time.Minute*5, 100),
-	)
-
 	return relay
 }
diff --git a/vendor/github.com/coder/websocket/LICENSE.txt b/vendor/github.com/coder/websocket/LICENSE.txt
new file mode 100644
index 0000000..77b5bef
--- /dev/null
+++ b/vendor/github.com/coder/websocket/LICENSE.txt
@@ -0,0 +1,13 @@
+Copyright (c) 2023 Anmol Sethi <hi@nhooyr.io>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/coder/websocket/README.md b/vendor/github.com/coder/websocket/README.md
new file mode 100644
index 0000000..c74b79d
--- /dev/null
+++ b/vendor/github.com/coder/websocket/README.md
@@ -0,0 +1,160 @@
+# websocket
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/coder/websocket.svg)](https://pkg.go.dev/github.com/coder/websocket)
+[![Go Coverage](https://img.shields.io/badge/coverage-91%25-success)](https://github.com/coder/websocket/coverage.html)
+
+websocket is a minimal and idiomatic WebSocket library for Go.
+
+## Install
+
+```sh
+go get github.com/coder/websocket
+```
+
+> [!NOTE]
+> Coder now maintains this project as explained in [this blog post](https://coder.com/blog/websocket).
+> We're grateful to [nhooyr](https://github.com/nhooyr) for authoring and maintaining this project from
+> 2019 to 2024.
+
+## Highlights
+
+- Minimal and idiomatic API
+- First class [context.Context](https://blog.golang.org/context) support
+- Fully passes the WebSocket [autobahn-testsuite](https://github.com/crossbario/autobahn-testsuite)
+- [Zero dependencies](https://pkg.go.dev/github.com/coder/websocket?tab=imports)
+- JSON helpers in the [wsjson](https://pkg.go.dev/github.com/coder/websocket/wsjson) subpackage
+- Zero alloc reads and writes
+- Concurrent writes
+- [Close handshake](https://pkg.go.dev/github.com/coder/websocket#Conn.Close)
+- [net.Conn](https://pkg.go.dev/github.com/coder/websocket#NetConn) wrapper
+- [Ping pong](https://pkg.go.dev/github.com/coder/websocket#Conn.Ping) API
+- [RFC 7692](https://tools.ietf.org/html/rfc7692) permessage-deflate compression
+- [CloseRead](https://pkg.go.dev/github.com/coder/websocket#Conn.CloseRead) helper for write only connections
+- Compile to [Wasm](https://pkg.go.dev/github.com/coder/websocket#hdr-Wasm)
+
+## Roadmap
+
+See GitHub issues for minor issues but the major future enhancements are:
+
+- [ ] Perfect examples [#217](https://github.com/nhooyr/websocket/issues/217)
+- [ ] wstest.Pipe for in memory testing [#340](https://github.com/nhooyr/websocket/issues/340)
+- [ ] Ping pong heartbeat helper [#267](https://github.com/nhooyr/websocket/issues/267)
+- [ ] Ping pong instrumentation callbacks [#246](https://github.com/nhooyr/websocket/issues/246)
+- [ ] Graceful shutdown helpers [#209](https://github.com/nhooyr/websocket/issues/209)
+- [ ] Assembly for WebSocket masking [#16](https://github.com/nhooyr/websocket/issues/16)
+  - WIP at [#326](https://github.com/nhooyr/websocket/pull/326), about 3x faster
+- [ ] HTTP/2 [#4](https://github.com/nhooyr/websocket/issues/4)
+- [ ] The holy grail [#402](https://github.com/nhooyr/websocket/issues/402)
+
+## Examples
+
+For a production quality example that demonstrates the complete API, see the
+[echo example](./internal/examples/echo).
+
+For a full stack example, see the [chat example](./internal/examples/chat).
+
+### Server
+
+```go
+http.HandlerFunc(func (w http.ResponseWriter, r *http.Request) {
+	c, err := websocket.Accept(w, r, nil)
+	if err != nil {
+		// ...
+	}
+	defer c.CloseNow()
+
+	ctx, cancel := context.WithTimeout(r.Context(), time.Second*10)
+	defer cancel()
+
+	var v interface{}
+	err = wsjson.Read(ctx, c, &v)
+	if err != nil {
+		// ...
+	}
+
+	log.Printf("received: %v", v)
+
+	c.Close(websocket.StatusNormalClosure, "")
+})
+```
+
+### Client
+
+```go
+ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
+defer cancel()
+
+c, _, err := websocket.Dial(ctx, "ws://localhost:8080", nil)
+if err != nil {
+	// ...
+}
+defer c.CloseNow()
+
+err = wsjson.Write(ctx, c, "hi")
+if err != nil {
+	// ...
+}
+
+c.Close(websocket.StatusNormalClosure, "")
+```
+
+## Comparison
+
+### gorilla/websocket
+
+Advantages of [gorilla/websocket](https://github.com/gorilla/websocket):
+
+- Mature and widely used
+- [Prepared writes](https://pkg.go.dev/github.com/gorilla/websocket#PreparedMessage)
+- Configurable [buffer sizes](https://pkg.go.dev/github.com/gorilla/websocket#hdr-Buffers)
+- No extra goroutine per connection to support cancellation with context.Context. This costs github.com/coder/websocket 2 KB of memory per connection.
+  - Will be removed soon with [context.AfterFunc](https://github.com/golang/go/issues/57928). See [#411](https://github.com/nhooyr/websocket/issues/411)
+
+Advantages of github.com/coder/websocket:
+
+- Minimal and idiomatic API
+  - Compare godoc of [github.com/coder/websocket](https://pkg.go.dev/github.com/coder/websocket) with [gorilla/websocket](https://pkg.go.dev/github.com/gorilla/websocket) side by side.
+- [net.Conn](https://pkg.go.dev/github.com/coder/websocket#NetConn) wrapper
+- Zero alloc reads and writes ([gorilla/websocket#535](https://github.com/gorilla/websocket/issues/535))
+- Full [context.Context](https://blog.golang.org/context) support
+- Dial uses [net/http.Client](https://golang.org/pkg/net/http/#Client)
+  - Will enable easy HTTP/2 support in the future
+  - Gorilla writes directly to a net.Conn and so duplicates features of net/http.Client.
+- Concurrent writes
+- Close handshake ([gorilla/websocket#448](https://github.com/gorilla/websocket/issues/448))
+- Idiomatic [ping pong](https://pkg.go.dev/github.com/coder/websocket#Conn.Ping) API
+  - Gorilla requires registering a pong callback before sending a Ping
+- Can target Wasm ([gorilla/websocket#432](https://github.com/gorilla/websocket/issues/432))
+- Transparent message buffer reuse with [wsjson](https://pkg.go.dev/github.com/coder/websocket/wsjson) subpackage
+- [1.75x](https://github.com/nhooyr/websocket/releases/tag/v1.7.4) faster WebSocket masking implementation in pure Go
+  - Gorilla's implementation is slower and uses [unsafe](https://golang.org/pkg/unsafe/).
+    Soon we'll have assembly and be 3x faster [#326](https://github.com/nhooyr/websocket/pull/326)
+- Full [permessage-deflate](https://tools.ietf.org/html/rfc7692) compression extension support
+  - Gorilla only supports no context takeover mode
+- [CloseRead](https://pkg.go.dev/github.com/coder/websocket#Conn.CloseRead) helper for write only connections ([gorilla/websocket#492](https://github.com/gorilla/websocket/issues/492))
+
+#### golang.org/x/net/websocket
+
+[golang.org/x/net/websocket](https://pkg.go.dev/golang.org/x/net/websocket) is deprecated.
+See [golang/go/issues/18152](https://github.com/golang/go/issues/18152).
+
+The [net.Conn](https://pkg.go.dev/github.com/coder/websocket#NetConn) can help in transitioning
+to github.com/coder/websocket.
+
+#### gobwas/ws
+
+[gobwas/ws](https://github.com/gobwas/ws) has an extremely flexible API that allows it to be used
+in an event driven style for performance. See the author's [blog post](https://medium.freecodecamp.org/million-websockets-and-go-cc58418460bb).
+
+However it is quite bloated. See https://pkg.go.dev/github.com/gobwas/ws
+
+When writing idiomatic Go, github.com/coder/websocket will be faster and easier to use.
+
+#### lesismal/nbio
+
+[lesismal/nbio](https://github.com/lesismal/nbio) is similar to gobwas/ws in that the API is
+event driven for performance reasons.
+
+However it is quite bloated. See https://pkg.go.dev/github.com/lesismal/nbio
+
+When writing idiomatic Go, github.com/coder/websocket will be faster and easier to use.
diff --git a/vendor/github.com/coder/websocket/accept.go b/vendor/github.com/coder/websocket/accept.go
new file mode 100644
index 0000000..f672a73
--- /dev/null
+++ b/vendor/github.com/coder/websocket/accept.go
@@ -0,0 +1,352 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"bytes"
+	"crypto/sha1"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"net/http"
+	"net/textproto"
+	"net/url"
+	"path/filepath"
+	"strings"
+
+	"github.com/coder/websocket/internal/errd"
+)
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+	// Subprotocols lists the WebSocket subprotocols that Accept will negotiate with the client.
+	// The empty subprotocol will always be negotiated as per RFC 6455. If you would like to
+	// reject it, close the connection when c.Subprotocol() == "".
+	Subprotocols []string
+
+	// InsecureSkipVerify is used to disable Accept's origin verification behaviour.
+	//
+	// You probably want to use OriginPatterns instead.
+	InsecureSkipVerify bool
+
+	// OriginPatterns lists the host patterns for authorized origins.
+	// The request host is always authorized.
+	// Use this to enable cross origin WebSockets.
+	//
+	// i.e javascript running on example.com wants to access a WebSocket server at chat.example.com.
+	// In such a case, example.com is the origin and chat.example.com is the request host.
+	// One would set this field to []string{"example.com"} to authorize example.com to connect.
+	//
+	// Each pattern is matched case insensitively against the request origin host
+	// with filepath.Match.
+	// See https://golang.org/pkg/path/filepath/#Match
+	//
+	// Please ensure you understand the ramifications of enabling this.
+	// If used incorrectly your WebSocket server will be open to CSRF attacks.
+	//
+	// Do not use * as a pattern to allow any origin, prefer to use InsecureSkipVerify instead
+	// to bring attention to the danger of such a setting.
+	OriginPatterns []string
+
+	// CompressionMode controls the compression mode.
+	// Defaults to CompressionDisabled.
+	//
+	// See docs on CompressionMode for details.
+	CompressionMode CompressionMode
+
+	// CompressionThreshold controls the minimum size of a message before compression is applied.
+	//
+	// Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+	// for CompressionContextTakeover.
+	CompressionThreshold int
+}
+
+func (opts *AcceptOptions) cloneWithDefaults() *AcceptOptions {
+	var o AcceptOptions
+	if opts != nil {
+		o = *opts
+	}
+	return &o
+}
+
+// Accept accepts a WebSocket handshake from a client and upgrades the
+// the connection to a WebSocket.
+//
+// Accept will not allow cross origin requests by default.
+// See the InsecureSkipVerify and OriginPatterns options to allow cross origin requests.
+//
+// Accept will write a response to w on all errors.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+	return accept(w, r, opts)
+}
+
+func accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (_ *Conn, err error) {
+	defer errd.Wrap(&err, "failed to accept WebSocket connection")
+
+	errCode, err := verifyClientRequest(w, r)
+	if err != nil {
+		http.Error(w, err.Error(), errCode)
+		return nil, err
+	}
+
+	opts = opts.cloneWithDefaults()
+	if !opts.InsecureSkipVerify {
+		err = authenticateOrigin(r, opts.OriginPatterns)
+		if err != nil {
+			if errors.Is(err, filepath.ErrBadPattern) {
+				log.Printf("websocket: %v", err)
+				err = errors.New(http.StatusText(http.StatusForbidden))
+			}
+			http.Error(w, err.Error(), http.StatusForbidden)
+			return nil, err
+		}
+	}
+
+	hj, ok := w.(http.Hijacker)
+	if !ok {
+		err = errors.New("http.ResponseWriter does not implement http.Hijacker")
+		http.Error(w, http.StatusText(http.StatusNotImplemented), http.StatusNotImplemented)
+		return nil, err
+	}
+
+	w.Header().Set("Upgrade", "websocket")
+	w.Header().Set("Connection", "Upgrade")
+
+	key := r.Header.Get("Sec-WebSocket-Key")
+	w.Header().Set("Sec-WebSocket-Accept", secWebSocketAccept(key))
+
+	subproto := selectSubprotocol(r, opts.Subprotocols)
+	if subproto != "" {
+		w.Header().Set("Sec-WebSocket-Protocol", subproto)
+	}
+
+	copts, ok := selectDeflate(websocketExtensions(r.Header), opts.CompressionMode)
+	if ok {
+		w.Header().Set("Sec-WebSocket-Extensions", copts.String())
+	}
+
+	w.WriteHeader(http.StatusSwitchingProtocols)
+	// See https://github.com/nhooyr/websocket/issues/166
+	if ginWriter, ok := w.(interface {
+		WriteHeaderNow()
+	}); ok {
+		ginWriter.WriteHeaderNow()
+	}
+
+	netConn, brw, err := hj.Hijack()
+	if err != nil {
+		err = fmt.Errorf("failed to hijack connection: %w", err)
+		http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
+		return nil, err
+	}
+
+	// https://github.com/golang/go/issues/32314
+	b, _ := brw.Reader.Peek(brw.Reader.Buffered())
+	brw.Reader.Reset(io.MultiReader(bytes.NewReader(b), netConn))
+
+	return newConn(connConfig{
+		subprotocol:    w.Header().Get("Sec-WebSocket-Protocol"),
+		rwc:            netConn,
+		client:         false,
+		copts:          copts,
+		flateThreshold: opts.CompressionThreshold,
+
+		br: brw.Reader,
+		bw: brw.Writer,
+	}), nil
+}
+
+func verifyClientRequest(w http.ResponseWriter, r *http.Request) (errCode int, _ error) {
+	if !r.ProtoAtLeast(1, 1) {
+		return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: handshake request must be at least HTTP/1.1: %q", r.Proto)
+	}
+
+	if !headerContainsTokenIgnoreCase(r.Header, "Connection", "Upgrade") {
+		w.Header().Set("Connection", "Upgrade")
+		w.Header().Set("Upgrade", "websocket")
+		return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", r.Header.Get("Connection"))
+	}
+
+	if !headerContainsTokenIgnoreCase(r.Header, "Upgrade", "websocket") {
+		w.Header().Set("Connection", "Upgrade")
+		w.Header().Set("Upgrade", "websocket")
+		return http.StatusUpgradeRequired, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", r.Header.Get("Upgrade"))
+	}
+
+	if r.Method != "GET" {
+		return http.StatusMethodNotAllowed, fmt.Errorf("WebSocket protocol violation: handshake request method is not GET but %q", r.Method)
+	}
+
+	if r.Header.Get("Sec-WebSocket-Version") != "13" {
+		w.Header().Set("Sec-WebSocket-Version", "13")
+		return http.StatusBadRequest, fmt.Errorf("unsupported WebSocket protocol version (only 13 is supported): %q", r.Header.Get("Sec-WebSocket-Version"))
+	}
+
+	websocketSecKeys := r.Header.Values("Sec-WebSocket-Key")
+	if len(websocketSecKeys) == 0 {
+		return http.StatusBadRequest, errors.New("WebSocket protocol violation: missing Sec-WebSocket-Key")
+	}
+
+	if len(websocketSecKeys) > 1 {
+		return http.StatusBadRequest, errors.New("WebSocket protocol violation: multiple Sec-WebSocket-Key headers")
+	}
+
+	// The RFC states to remove any leading or trailing whitespace.
+	websocketSecKey := strings.TrimSpace(websocketSecKeys[0])
+	if v, err := base64.StdEncoding.DecodeString(websocketSecKey); err != nil || len(v) != 16 {
+		return http.StatusBadRequest, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Key %q, must be a 16 byte base64 encoded string", websocketSecKey)
+	}
+
+	return 0, nil
+}
+
+func authenticateOrigin(r *http.Request, originHosts []string) error {
+	origin := r.Header.Get("Origin")
+	if origin == "" {
+		return nil
+	}
+
+	u, err := url.Parse(origin)
+	if err != nil {
+		return fmt.Errorf("failed to parse Origin header %q: %w", origin, err)
+	}
+
+	if strings.EqualFold(r.Host, u.Host) {
+		return nil
+	}
+
+	for _, hostPattern := range originHosts {
+		matched, err := match(hostPattern, u.Host)
+		if err != nil {
+			return fmt.Errorf("failed to parse filepath pattern %q: %w", hostPattern, err)
+		}
+		if matched {
+			return nil
+		}
+	}
+	if u.Host == "" {
+		return fmt.Errorf("request Origin %q is not a valid URL with a host", origin)
+	}
+	return fmt.Errorf("request Origin %q is not authorized for Host %q", u.Host, r.Host)
+}
+
+func match(pattern, s string) (bool, error) {
+	return filepath.Match(strings.ToLower(pattern), strings.ToLower(s))
+}
+
+func selectSubprotocol(r *http.Request, subprotocols []string) string {
+	cps := headerTokens(r.Header, "Sec-WebSocket-Protocol")
+	for _, sp := range subprotocols {
+		for _, cp := range cps {
+			if strings.EqualFold(sp, cp) {
+				return cp
+			}
+		}
+	}
+	return ""
+}
+
+func selectDeflate(extensions []websocketExtension, mode CompressionMode) (*compressionOptions, bool) {
+	if mode == CompressionDisabled {
+		return nil, false
+	}
+	for _, ext := range extensions {
+		switch ext.name {
+		// We used to implement x-webkit-deflate-frame too for Safari but Safari has bugs...
+		// See https://github.com/nhooyr/websocket/issues/218
+		case "permessage-deflate":
+			copts, ok := acceptDeflate(ext, mode)
+			if ok {
+				return copts, true
+			}
+		}
+	}
+	return nil, false
+}
+
+func acceptDeflate(ext websocketExtension, mode CompressionMode) (*compressionOptions, bool) {
+	copts := mode.opts()
+	for _, p := range ext.params {
+		switch p {
+		case "client_no_context_takeover":
+			copts.clientNoContextTakeover = true
+			continue
+		case "server_no_context_takeover":
+			copts.serverNoContextTakeover = true
+			continue
+		case "client_max_window_bits",
+			"server_max_window_bits=15":
+			continue
+		}
+
+		if strings.HasPrefix(p, "client_max_window_bits=") {
+			// We can't adjust the deflate window, but decoding with a larger window is acceptable.
+			continue
+		}
+		return nil, false
+	}
+	return copts, true
+}
+
+func headerContainsTokenIgnoreCase(h http.Header, key, token string) bool {
+	for _, t := range headerTokens(h, key) {
+		if strings.EqualFold(t, token) {
+			return true
+		}
+	}
+	return false
+}
+
+type websocketExtension struct {
+	name   string
+	params []string
+}
+
+func websocketExtensions(h http.Header) []websocketExtension {
+	var exts []websocketExtension
+	extStrs := headerTokens(h, "Sec-WebSocket-Extensions")
+	for _, extStr := range extStrs {
+		if extStr == "" {
+			continue
+		}
+
+		vals := strings.Split(extStr, ";")
+		for i := range vals {
+			vals[i] = strings.TrimSpace(vals[i])
+		}
+
+		e := websocketExtension{
+			name:   vals[0],
+			params: vals[1:],
+		}
+
+		exts = append(exts, e)
+	}
+	return exts
+}
+
+func headerTokens(h http.Header, key string) []string {
+	key = textproto.CanonicalMIMEHeaderKey(key)
+	var tokens []string
+	for _, v := range h[key] {
+		v = strings.TrimSpace(v)
+		for _, t := range strings.Split(v, ",") {
+			t = strings.TrimSpace(t)
+			tokens = append(tokens, t)
+		}
+	}
+	return tokens
+}
+
+var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
+
+func secWebSocketAccept(secWebSocketKey string) string {
+	h := sha1.New()
+	h.Write([]byte(secWebSocketKey))
+	h.Write(keyGUID)
+
+	return base64.StdEncoding.EncodeToString(h.Sum(nil))
+}
diff --git a/vendor/github.com/coder/websocket/close.go b/vendor/github.com/coder/websocket/close.go
new file mode 100644
index 0000000..ff2e878
--- /dev/null
+++ b/vendor/github.com/coder/websocket/close.go
@@ -0,0 +1,348 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/coder/websocket/internal/errd"
+)
+
+// StatusCode represents a WebSocket status code.
+// https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode int
+
+// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+//
+// These are only the status codes defined by the protocol.
+//
+// You can define custom codes in the 3000-4999 range.
+// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
+// The 4000-4999 range is reserved for private use.
+const (
+	StatusNormalClosure   StatusCode = 1000
+	StatusGoingAway       StatusCode = 1001
+	StatusProtocolError   StatusCode = 1002
+	StatusUnsupportedData StatusCode = 1003
+
+	// 1004 is reserved and so unexported.
+	statusReserved StatusCode = 1004
+
+	// StatusNoStatusRcvd cannot be sent in a close message.
+	// It is reserved for when a close message is received without
+	// a status code.
+	StatusNoStatusRcvd StatusCode = 1005
+
+	// StatusAbnormalClosure is exported for use only with Wasm.
+	// In non Wasm Go, the returned error will indicate whether the
+	// connection was closed abnormally.
+	StatusAbnormalClosure StatusCode = 1006
+
+	StatusInvalidFramePayloadData StatusCode = 1007
+	StatusPolicyViolation         StatusCode = 1008
+	StatusMessageTooBig           StatusCode = 1009
+	StatusMandatoryExtension      StatusCode = 1010
+	StatusInternalError           StatusCode = 1011
+	StatusServiceRestart          StatusCode = 1012
+	StatusTryAgainLater           StatusCode = 1013
+	StatusBadGateway              StatusCode = 1014
+
+	// StatusTLSHandshake is only exported for use with Wasm.
+	// In non Wasm Go, the returned error will indicate whether there was
+	// a TLS handshake failure.
+	StatusTLSHandshake StatusCode = 1015
+)
+
+// CloseError is returned when the connection is closed with a status and reason.
+//
+// Use Go 1.13's errors.As to check for this error.
+// Also see the CloseStatus helper.
+type CloseError struct {
+	Code   StatusCode
+	Reason string
+}
+
+func (ce CloseError) Error() string {
+	return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
+}
+
+// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
+// the status code from a CloseError.
+//
+// -1 will be returned if the passed error is nil or not a CloseError.
+func CloseStatus(err error) StatusCode {
+	var ce CloseError
+	if errors.As(err, &ce) {
+		return ce.Code
+	}
+	return -1
+}
+
+// Close performs the WebSocket close handshake with the given status code and reason.
+//
+// It will write a WebSocket close frame with a timeout of 5s and then wait 5s for
+// the peer to send a close frame.
+// All data messages received from the peer during the close handshake will be discarded.
+//
+// The connection can only be closed once. Additional calls to Close
+// are no-ops.
+//
+// The maximum length of reason must be 125 bytes. Avoid sending a dynamic reason.
+//
+// Close will unblock all goroutines interacting with the connection once
+// complete.
+func (c *Conn) Close(code StatusCode, reason string) (err error) {
+	defer errd.Wrap(&err, "failed to close WebSocket")
+
+	if !c.casClosing() {
+		err = c.waitGoroutines()
+		if err != nil {
+			return err
+		}
+		return net.ErrClosed
+	}
+	defer func() {
+		if errors.Is(err, net.ErrClosed) {
+			err = nil
+		}
+	}()
+
+	err = c.closeHandshake(code, reason)
+
+	err2 := c.close()
+	if err == nil && err2 != nil {
+		err = err2
+	}
+
+	err2 = c.waitGoroutines()
+	if err == nil && err2 != nil {
+		err = err2
+	}
+
+	return err
+}
+
+// CloseNow closes the WebSocket connection without attempting a close handshake.
+// Use when you do not want the overhead of the close handshake.
+func (c *Conn) CloseNow() (err error) {
+	defer errd.Wrap(&err, "failed to immediately close WebSocket")
+
+	if !c.casClosing() {
+		err = c.waitGoroutines()
+		if err != nil {
+			return err
+		}
+		return net.ErrClosed
+	}
+	defer func() {
+		if errors.Is(err, net.ErrClosed) {
+			err = nil
+		}
+	}()
+
+	err = c.close()
+
+	err2 := c.waitGoroutines()
+	if err == nil && err2 != nil {
+		err = err2
+	}
+	return err
+}
+
+func (c *Conn) closeHandshake(code StatusCode, reason string) error {
+	err := c.writeClose(code, reason)
+	if err != nil {
+		return err
+	}
+
+	err = c.waitCloseHandshake()
+	if CloseStatus(err) != code {
+		return err
+	}
+	return nil
+}
+
+func (c *Conn) writeClose(code StatusCode, reason string) error {
+	ce := CloseError{
+		Code:   code,
+		Reason: reason,
+	}
+
+	var p []byte
+	var err error
+	if ce.Code != StatusNoStatusRcvd {
+		p, err = ce.bytes()
+		if err != nil {
+			return err
+		}
+	}
+
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+	defer cancel()
+
+	err = c.writeControl(ctx, opClose, p)
+	// If the connection closed as we're writing we ignore the error as we might
+	// have written the close frame, the peer responded and then someone else read it
+	// and closed the connection.
+	if err != nil && !errors.Is(err, net.ErrClosed) {
+		return err
+	}
+	return nil
+}
+
+func (c *Conn) waitCloseHandshake() error {
+	ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+	defer cancel()
+
+	err := c.readMu.lock(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.readMu.unlock()
+
+	for i := int64(0); i < c.msgReader.payloadLength; i++ {
+		_, err := c.br.ReadByte()
+		if err != nil {
+			return err
+		}
+	}
+
+	for {
+		h, err := c.readLoop(ctx)
+		if err != nil {
+			return err
+		}
+
+		for i := int64(0); i < h.payloadLength; i++ {
+			_, err := c.br.ReadByte()
+			if err != nil {
+				return err
+			}
+		}
+	}
+}
+
+func (c *Conn) waitGoroutines() error {
+	t := time.NewTimer(time.Second * 15)
+	defer t.Stop()
+
+	select {
+	case <-c.timeoutLoopDone:
+	case <-t.C:
+		return errors.New("failed to wait for timeoutLoop goroutine to exit")
+	}
+
+	c.closeReadMu.Lock()
+	closeRead := c.closeReadCtx != nil
+	c.closeReadMu.Unlock()
+	if closeRead {
+		select {
+		case <-c.closeReadDone:
+		case <-t.C:
+			return errors.New("failed to wait for close read goroutine to exit")
+		}
+	}
+
+	select {
+	case <-c.closed:
+	case <-t.C:
+		return errors.New("failed to wait for connection to be closed")
+	}
+
+	return nil
+}
+
+func parseClosePayload(p []byte) (CloseError, error) {
+	if len(p) == 0 {
+		return CloseError{
+			Code: StatusNoStatusRcvd,
+		}, nil
+	}
+
+	if len(p) < 2 {
+		return CloseError{}, fmt.Errorf("close payload %q too small, cannot even contain the 2 byte status code", p)
+	}
+
+	ce := CloseError{
+		Code:   StatusCode(binary.BigEndian.Uint16(p)),
+		Reason: string(p[2:]),
+	}
+
+	if !validWireCloseCode(ce.Code) {
+		return CloseError{}, fmt.Errorf("invalid status code %v", ce.Code)
+	}
+
+	return ce, nil
+}
+
+// See http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+// and https://tools.ietf.org/html/rfc6455#section-7.4.1
+func validWireCloseCode(code StatusCode) bool {
+	switch code {
+	case statusReserved, StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
+		return false
+	}
+
+	if code >= StatusNormalClosure && code <= StatusBadGateway {
+		return true
+	}
+	if code >= 3000 && code <= 4999 {
+		return true
+	}
+
+	return false
+}
+
+func (ce CloseError) bytes() ([]byte, error) {
+	p, err := ce.bytesErr()
+	if err != nil {
+		err = fmt.Errorf("failed to marshal close frame: %w", err)
+		ce = CloseError{
+			Code: StatusInternalError,
+		}
+		p, _ = ce.bytesErr()
+	}
+	return p, err
+}
+
+const maxCloseReason = maxControlPayload - 2
+
+func (ce CloseError) bytesErr() ([]byte, error) {
+	if len(ce.Reason) > maxCloseReason {
+		return nil, fmt.Errorf("reason string max is %v but got %q with length %v", maxCloseReason, ce.Reason, len(ce.Reason))
+	}
+
+	if !validWireCloseCode(ce.Code) {
+		return nil, fmt.Errorf("status code %v cannot be set", ce.Code)
+	}
+
+	buf := make([]byte, 2+len(ce.Reason))
+	binary.BigEndian.PutUint16(buf, uint16(ce.Code))
+	copy(buf[2:], ce.Reason)
+	return buf, nil
+}
+
+func (c *Conn) casClosing() bool {
+	c.closeMu.Lock()
+	defer c.closeMu.Unlock()
+	if !c.closing {
+		c.closing = true
+		return true
+	}
+	return false
+}
+
+func (c *Conn) isClosed() bool {
+	select {
+	case <-c.closed:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/coder/websocket/compress.go b/vendor/github.com/coder/websocket/compress.go
new file mode 100644
index 0000000..1f3adcf
--- /dev/null
+++ b/vendor/github.com/coder/websocket/compress.go
@@ -0,0 +1,233 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"compress/flate"
+	"io"
+	"sync"
+)
+
+// CompressionMode represents the modes available to the permessage-deflate extension.
+// See https://tools.ietf.org/html/rfc7692
+//
+// Works in all modern browsers except Safari which does not implement the permessage-deflate extension.
+//
+// Compression is only used if the peer supports the mode selected.
+type CompressionMode int
+
+const (
+	// CompressionDisabled disables the negotiation of the permessage-deflate extension.
+	//
+	// This is the default. Do not enable compression without benchmarking for your particular use case first.
+	CompressionDisabled CompressionMode = iota
+
+	// CompressionContextTakeover compresses each message greater than 128 bytes reusing the 32 KB sliding window from
+	// previous messages. i.e compression context across messages is preserved.
+	//
+	// As most WebSocket protocols are text based and repetitive, this compression mode can be very efficient.
+	//
+	// The memory overhead is a fixed 32 KB sliding window, a fixed 1.2 MB flate.Writer and a sync.Pool of 40 KB flate.Reader's
+	// that are used when reading and then returned.
+	//
+	// Thus, it uses more memory than CompressionNoContextTakeover but compresses more efficiently.
+	//
+	// If the peer does not support CompressionContextTakeover then we will fall back to CompressionNoContextTakeover.
+	CompressionContextTakeover
+
+	// CompressionNoContextTakeover compresses each message greater than 512 bytes. Each message is compressed with
+	// a new 1.2 MB flate.Writer pulled from a sync.Pool. Each message is read with a 40 KB flate.Reader pulled from
+	// a sync.Pool.
+	//
+	// This means less efficient compression as the sliding window from previous messages will not be used but the
+	// memory overhead will be lower as there will be no fixed cost for the flate.Writer nor the 32 KB sliding window.
+	// Especially if the connections are long lived and seldom written to.
+	//
+	// Thus, it uses less memory than CompressionContextTakeover but compresses less efficiently.
+	//
+	// If the peer does not support CompressionNoContextTakeover then we will fall back to CompressionDisabled.
+	CompressionNoContextTakeover
+)
+
+func (m CompressionMode) opts() *compressionOptions {
+	return &compressionOptions{
+		clientNoContextTakeover: m == CompressionNoContextTakeover,
+		serverNoContextTakeover: m == CompressionNoContextTakeover,
+	}
+}
+
+type compressionOptions struct {
+	clientNoContextTakeover bool
+	serverNoContextTakeover bool
+}
+
+func (copts *compressionOptions) String() string {
+	s := "permessage-deflate"
+	if copts.clientNoContextTakeover {
+		s += "; client_no_context_takeover"
+	}
+	if copts.serverNoContextTakeover {
+		s += "; server_no_context_takeover"
+	}
+	return s
+}
+
+// These bytes are required to get flate.Reader to return.
+// They are removed when sending to avoid the overhead as
+// WebSocket framing tell's when the message has ended but then
+// we need to add them back otherwise flate.Reader keeps
+// trying to read more bytes.
+const deflateMessageTail = "\x00\x00\xff\xff"
+
+type trimLastFourBytesWriter struct {
+	w    io.Writer
+	tail []byte
+}
+
+func (tw *trimLastFourBytesWriter) reset() {
+	if tw != nil && tw.tail != nil {
+		tw.tail = tw.tail[:0]
+	}
+}
+
+func (tw *trimLastFourBytesWriter) Write(p []byte) (int, error) {
+	if tw.tail == nil {
+		tw.tail = make([]byte, 0, 4)
+	}
+
+	extra := len(tw.tail) + len(p) - 4
+
+	if extra <= 0 {
+		tw.tail = append(tw.tail, p...)
+		return len(p), nil
+	}
+
+	// Now we need to write as many extra bytes as we can from the previous tail.
+	if extra > len(tw.tail) {
+		extra = len(tw.tail)
+	}
+	if extra > 0 {
+		_, err := tw.w.Write(tw.tail[:extra])
+		if err != nil {
+			return 0, err
+		}
+
+		// Shift remaining bytes in tail over.
+		n := copy(tw.tail, tw.tail[extra:])
+		tw.tail = tw.tail[:n]
+	}
+
+	// If p is less than or equal to 4 bytes,
+	// all of it is is part of the tail.
+	if len(p) <= 4 {
+		tw.tail = append(tw.tail, p...)
+		return len(p), nil
+	}
+
+	// Otherwise, only the last 4 bytes are.
+	tw.tail = append(tw.tail, p[len(p)-4:]...)
+
+	p = p[:len(p)-4]
+	n, err := tw.w.Write(p)
+	return n + 4, err
+}
+
+var flateReaderPool sync.Pool
+
+func getFlateReader(r io.Reader, dict []byte) io.Reader {
+	fr, ok := flateReaderPool.Get().(io.Reader)
+	if !ok {
+		return flate.NewReaderDict(r, dict)
+	}
+	fr.(flate.Resetter).Reset(r, dict)
+	return fr
+}
+
+func putFlateReader(fr io.Reader) {
+	flateReaderPool.Put(fr)
+}
+
+var flateWriterPool sync.Pool
+
+func getFlateWriter(w io.Writer) *flate.Writer {
+	fw, ok := flateWriterPool.Get().(*flate.Writer)
+	if !ok {
+		fw, _ = flate.NewWriter(w, flate.BestSpeed)
+		return fw
+	}
+	fw.Reset(w)
+	return fw
+}
+
+func putFlateWriter(w *flate.Writer) {
+	flateWriterPool.Put(w)
+}
+
+type slidingWindow struct {
+	buf []byte
+}
+
+var swPoolMu sync.RWMutex
+var swPool = map[int]*sync.Pool{}
+
+func slidingWindowPool(n int) *sync.Pool {
+	swPoolMu.RLock()
+	p, ok := swPool[n]
+	swPoolMu.RUnlock()
+	if ok {
+		return p
+	}
+
+	p = &sync.Pool{}
+
+	swPoolMu.Lock()
+	swPool[n] = p
+	swPoolMu.Unlock()
+
+	return p
+}
+
+func (sw *slidingWindow) init(n int) {
+	if sw.buf != nil {
+		return
+	}
+
+	if n == 0 {
+		n = 32768
+	}
+
+	p := slidingWindowPool(n)
+	sw2, ok := p.Get().(*slidingWindow)
+	if ok {
+		*sw = *sw2
+	} else {
+		sw.buf = make([]byte, 0, n)
+	}
+}
+
+func (sw *slidingWindow) close() {
+	sw.buf = sw.buf[:0]
+	swPoolMu.Lock()
+	swPool[cap(sw.buf)].Put(sw)
+	swPoolMu.Unlock()
+}
+
+func (sw *slidingWindow) write(p []byte) {
+	if len(p) >= cap(sw.buf) {
+		sw.buf = sw.buf[:cap(sw.buf)]
+		p = p[len(p)-cap(sw.buf):]
+		copy(sw.buf, p)
+		return
+	}
+
+	left := cap(sw.buf) - len(sw.buf)
+	if left < len(p) {
+		// We need to shift spaceNeeded bytes from the end to make room for p at the end.
+		spaceNeeded := len(p) - left
+		copy(sw.buf, sw.buf[spaceNeeded:])
+		sw.buf = sw.buf[:len(sw.buf)-spaceNeeded]
+	}
+
+	sw.buf = append(sw.buf, p...)
+}
diff --git a/vendor/github.com/coder/websocket/conn.go b/vendor/github.com/coder/websocket/conn.go
new file mode 100644
index 0000000..8690fb3
--- /dev/null
+++ b/vendor/github.com/coder/websocket/conn.go
@@ -0,0 +1,295 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"bufio"
+	"context"
+	"fmt"
+	"io"
+	"net"
+	"runtime"
+	"strconv"
+	"sync"
+	"sync/atomic"
+)
+
+// MessageType represents the type of a WebSocket message.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+type MessageType int
+
+// MessageType constants.
+const (
+	// MessageText is for UTF-8 encoded text messages like JSON.
+	MessageText MessageType = iota + 1
+	// MessageBinary is for binary messages like protobufs.
+	MessageBinary
+)
+
+// Conn represents a WebSocket connection.
+// All methods may be called concurrently except for Reader and Read.
+//
+// You must always read from the connection. Otherwise control
+// frames will not be handled. See Reader and CloseRead.
+//
+// Be sure to call Close on the connection when you
+// are finished with it to release associated resources.
+//
+// On any error from any method, the connection is closed
+// with an appropriate reason.
+//
+// This applies to context expirations as well unfortunately.
+// See https://github.com/nhooyr/websocket/issues/242#issuecomment-633182220
+type Conn struct {
+	noCopy noCopy
+
+	subprotocol    string
+	rwc            io.ReadWriteCloser
+	client         bool
+	copts          *compressionOptions
+	flateThreshold int
+	br             *bufio.Reader
+	bw             *bufio.Writer
+
+	readTimeout     chan context.Context
+	writeTimeout    chan context.Context
+	timeoutLoopDone chan struct{}
+
+	// Read state.
+	readMu         *mu
+	readHeaderBuf  [8]byte
+	readControlBuf [maxControlPayload]byte
+	msgReader      *msgReader
+
+	// Write state.
+	msgWriter      *msgWriter
+	writeFrameMu   *mu
+	writeBuf       []byte
+	writeHeaderBuf [8]byte
+	writeHeader    header
+
+	closeReadMu   sync.Mutex
+	closeReadCtx  context.Context
+	closeReadDone chan struct{}
+
+	closed  chan struct{}
+	closeMu sync.Mutex
+	closing bool
+
+	pingCounter   int32
+	activePingsMu sync.Mutex
+	activePings   map[string]chan<- struct{}
+}
+
+type connConfig struct {
+	subprotocol    string
+	rwc            io.ReadWriteCloser
+	client         bool
+	copts          *compressionOptions
+	flateThreshold int
+
+	br *bufio.Reader
+	bw *bufio.Writer
+}
+
+func newConn(cfg connConfig) *Conn {
+	c := &Conn{
+		subprotocol:    cfg.subprotocol,
+		rwc:            cfg.rwc,
+		client:         cfg.client,
+		copts:          cfg.copts,
+		flateThreshold: cfg.flateThreshold,
+
+		br: cfg.br,
+		bw: cfg.bw,
+
+		readTimeout:     make(chan context.Context),
+		writeTimeout:    make(chan context.Context),
+		timeoutLoopDone: make(chan struct{}),
+
+		closed:      make(chan struct{}),
+		activePings: make(map[string]chan<- struct{}),
+	}
+
+	c.readMu = newMu(c)
+	c.writeFrameMu = newMu(c)
+
+	c.msgReader = newMsgReader(c)
+
+	c.msgWriter = newMsgWriter(c)
+	if c.client {
+		c.writeBuf = extractBufioWriterBuf(c.bw, c.rwc)
+	}
+
+	if c.flate() && c.flateThreshold == 0 {
+		c.flateThreshold = 128
+		if !c.msgWriter.flateContextTakeover() {
+			c.flateThreshold = 512
+		}
+	}
+
+	runtime.SetFinalizer(c, func(c *Conn) {
+		c.close()
+	})
+
+	go c.timeoutLoop()
+
+	return c
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+	return c.subprotocol
+}
+
+func (c *Conn) close() error {
+	c.closeMu.Lock()
+	defer c.closeMu.Unlock()
+
+	if c.isClosed() {
+		return net.ErrClosed
+	}
+	runtime.SetFinalizer(c, nil)
+	close(c.closed)
+
+	// Have to close after c.closed is closed to ensure any goroutine that wakes up
+	// from the connection being closed also sees that c.closed is closed and returns
+	// closeErr.
+	err := c.rwc.Close()
+	// With the close of rwc, these become safe to close.
+	c.msgWriter.close()
+	c.msgReader.close()
+	return err
+}
+
+func (c *Conn) timeoutLoop() {
+	defer close(c.timeoutLoopDone)
+
+	readCtx := context.Background()
+	writeCtx := context.Background()
+
+	for {
+		select {
+		case <-c.closed:
+			return
+
+		case writeCtx = <-c.writeTimeout:
+		case readCtx = <-c.readTimeout:
+
+		case <-readCtx.Done():
+			c.close()
+			return
+		case <-writeCtx.Done():
+			c.close()
+			return
+		}
+	}
+}
+
+func (c *Conn) flate() bool {
+	return c.copts != nil
+}
+
+// Ping sends a ping to the peer and waits for a pong.
+// Use this to measure latency or ensure the peer is responsive.
+// Ping must be called concurrently with Reader as it does
+// not read from the connection but instead waits for a Reader call
+// to read the pong.
+//
+// TCP Keepalives should suffice for most use cases.
+func (c *Conn) Ping(ctx context.Context) error {
+	p := atomic.AddInt32(&c.pingCounter, 1)
+
+	err := c.ping(ctx, strconv.Itoa(int(p)))
+	if err != nil {
+		return fmt.Errorf("failed to ping: %w", err)
+	}
+	return nil
+}
+
+func (c *Conn) ping(ctx context.Context, p string) error {
+	pong := make(chan struct{}, 1)
+
+	c.activePingsMu.Lock()
+	c.activePings[p] = pong
+	c.activePingsMu.Unlock()
+
+	defer func() {
+		c.activePingsMu.Lock()
+		delete(c.activePings, p)
+		c.activePingsMu.Unlock()
+	}()
+
+	err := c.writeControl(ctx, opPing, []byte(p))
+	if err != nil {
+		return err
+	}
+
+	select {
+	case <-c.closed:
+		return net.ErrClosed
+	case <-ctx.Done():
+		return fmt.Errorf("failed to wait for pong: %w", ctx.Err())
+	case <-pong:
+		return nil
+	}
+}
+
+type mu struct {
+	c  *Conn
+	ch chan struct{}
+}
+
+func newMu(c *Conn) *mu {
+	return &mu{
+		c:  c,
+		ch: make(chan struct{}, 1),
+	}
+}
+
+func (m *mu) forceLock() {
+	m.ch <- struct{}{}
+}
+
+func (m *mu) tryLock() bool {
+	select {
+	case m.ch <- struct{}{}:
+		return true
+	default:
+		return false
+	}
+}
+
+func (m *mu) lock(ctx context.Context) error {
+	select {
+	case <-m.c.closed:
+		return net.ErrClosed
+	case <-ctx.Done():
+		return fmt.Errorf("failed to acquire lock: %w", ctx.Err())
+	case m.ch <- struct{}{}:
+		// To make sure the connection is certainly alive.
+		// As it's possible the send on m.ch was selected
+		// over the receive on closed.
+		select {
+		case <-m.c.closed:
+			// Make sure to release.
+			m.unlock()
+			return net.ErrClosed
+		default:
+		}
+		return nil
+	}
+}
+
+func (m *mu) unlock() {
+	select {
+	case <-m.ch:
+	default:
+	}
+}
+
+type noCopy struct{}
+
+func (*noCopy) Lock() {}
diff --git a/vendor/github.com/coder/websocket/dial.go b/vendor/github.com/coder/websocket/dial.go
new file mode 100644
index 0000000..ad61a35
--- /dev/null
+++ b/vendor/github.com/coder/websocket/dial.go
@@ -0,0 +1,330 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"crypto/rand"
+	"encoding/base64"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/coder/websocket/internal/errd"
+)
+
+// DialOptions represents Dial's options.
+type DialOptions struct {
+	// HTTPClient is used for the connection.
+	// Its Transport must return writable bodies for WebSocket handshakes.
+	// http.Transport does beginning with Go 1.12.
+	HTTPClient *http.Client
+
+	// HTTPHeader specifies the HTTP headers included in the handshake request.
+	HTTPHeader http.Header
+
+	// Host optionally overrides the Host HTTP header to send. If empty, the value
+	// of URL.Host will be used.
+	Host string
+
+	// Subprotocols lists the WebSocket subprotocols to negotiate with the server.
+	Subprotocols []string
+
+	// CompressionMode controls the compression mode.
+	// Defaults to CompressionDisabled.
+	//
+	// See docs on CompressionMode for details.
+	CompressionMode CompressionMode
+
+	// CompressionThreshold controls the minimum size of a message before compression is applied.
+	//
+	// Defaults to 512 bytes for CompressionNoContextTakeover and 128 bytes
+	// for CompressionContextTakeover.
+	CompressionThreshold int
+}
+
+func (opts *DialOptions) cloneWithDefaults(ctx context.Context) (context.Context, context.CancelFunc, *DialOptions) {
+	var cancel context.CancelFunc
+
+	var o DialOptions
+	if opts != nil {
+		o = *opts
+	}
+	if o.HTTPClient == nil {
+		o.HTTPClient = http.DefaultClient
+	}
+	if o.HTTPClient.Timeout > 0 {
+		ctx, cancel = context.WithTimeout(ctx, o.HTTPClient.Timeout)
+
+		newClient := *o.HTTPClient
+		newClient.Timeout = 0
+		o.HTTPClient = &newClient
+	}
+	if o.HTTPHeader == nil {
+		o.HTTPHeader = http.Header{}
+	}
+	newClient := *o.HTTPClient
+	oldCheckRedirect := o.HTTPClient.CheckRedirect
+	newClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
+		switch req.URL.Scheme {
+		case "ws":
+			req.URL.Scheme = "http"
+		case "wss":
+			req.URL.Scheme = "https"
+		}
+		if oldCheckRedirect != nil {
+			return oldCheckRedirect(req, via)
+		}
+		return nil
+	}
+	o.HTTPClient = &newClient
+
+	return ctx, cancel, &o
+}
+
+// Dial performs a WebSocket handshake on url.
+//
+// The response is the WebSocket handshake response from the server.
+// You never need to close resp.Body yourself.
+//
+// If an error occurs, the returned response may be non nil.
+// However, you can only read the first 1024 bytes of the body.
+//
+// This function requires at least Go 1.12 as it uses a new feature
+// in net/http to perform WebSocket handshakes.
+// See docs on the HTTPClient option and https://github.com/golang/go/issues/26937#issuecomment-415855861
+//
+// URLs with http/https schemes will work and are interpreted as ws/wss.
+func Dial(ctx context.Context, u string, opts *DialOptions) (*Conn, *http.Response, error) {
+	return dial(ctx, u, opts, nil)
+}
+
+func dial(ctx context.Context, urls string, opts *DialOptions, rand io.Reader) (_ *Conn, _ *http.Response, err error) {
+	defer errd.Wrap(&err, "failed to WebSocket dial")
+
+	var cancel context.CancelFunc
+	ctx, cancel, opts = opts.cloneWithDefaults(ctx)
+	if cancel != nil {
+		defer cancel()
+	}
+
+	secWebSocketKey, err := secWebSocketKey(rand)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to generate Sec-WebSocket-Key: %w", err)
+	}
+
+	var copts *compressionOptions
+	if opts.CompressionMode != CompressionDisabled {
+		copts = opts.CompressionMode.opts()
+	}
+
+	resp, err := handshakeRequest(ctx, urls, opts, copts, secWebSocketKey)
+	if err != nil {
+		return nil, resp, err
+	}
+	respBody := resp.Body
+	resp.Body = nil
+	defer func() {
+		if err != nil {
+			// We read a bit of the body for easier debugging.
+			r := io.LimitReader(respBody, 1024)
+
+			timer := time.AfterFunc(time.Second*3, func() {
+				respBody.Close()
+			})
+			defer timer.Stop()
+
+			b, _ := io.ReadAll(r)
+			respBody.Close()
+			resp.Body = io.NopCloser(bytes.NewReader(b))
+		}
+	}()
+
+	copts, err = verifyServerResponse(opts, copts, secWebSocketKey, resp)
+	if err != nil {
+		return nil, resp, err
+	}
+
+	rwc, ok := respBody.(io.ReadWriteCloser)
+	if !ok {
+		return nil, resp, fmt.Errorf("response body is not a io.ReadWriteCloser: %T", respBody)
+	}
+
+	return newConn(connConfig{
+		subprotocol:    resp.Header.Get("Sec-WebSocket-Protocol"),
+		rwc:            rwc,
+		client:         true,
+		copts:          copts,
+		flateThreshold: opts.CompressionThreshold,
+		br:             getBufioReader(rwc),
+		bw:             getBufioWriter(rwc),
+	}), resp, nil
+}
+
+func handshakeRequest(ctx context.Context, urls string, opts *DialOptions, copts *compressionOptions, secWebSocketKey string) (*http.Response, error) {
+	u, err := url.Parse(urls)
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse url: %w", err)
+	}
+
+	switch u.Scheme {
+	case "ws":
+		u.Scheme = "http"
+	case "wss":
+		u.Scheme = "https"
+	case "http", "https":
+	default:
+		return nil, fmt.Errorf("unexpected url scheme: %q", u.Scheme)
+	}
+
+	req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to create new http request: %w", err)
+	}
+	if len(opts.Host) > 0 {
+		req.Host = opts.Host
+	}
+	req.Header = opts.HTTPHeader.Clone()
+	req.Header.Set("Connection", "Upgrade")
+	req.Header.Set("Upgrade", "websocket")
+	req.Header.Set("Sec-WebSocket-Version", "13")
+	req.Header.Set("Sec-WebSocket-Key", secWebSocketKey)
+	if len(opts.Subprotocols) > 0 {
+		req.Header.Set("Sec-WebSocket-Protocol", strings.Join(opts.Subprotocols, ","))
+	}
+	if copts != nil {
+		req.Header.Set("Sec-WebSocket-Extensions", copts.String())
+	}
+
+	resp, err := opts.HTTPClient.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed to send handshake request: %w", err)
+	}
+	return resp, nil
+}
+
+func secWebSocketKey(rr io.Reader) (string, error) {
+	if rr == nil {
+		rr = rand.Reader
+	}
+	b := make([]byte, 16)
+	_, err := io.ReadFull(rr, b)
+	if err != nil {
+		return "", fmt.Errorf("failed to read random data from rand.Reader: %w", err)
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+func verifyServerResponse(opts *DialOptions, copts *compressionOptions, secWebSocketKey string, resp *http.Response) (*compressionOptions, error) {
+	if resp.StatusCode != http.StatusSwitchingProtocols {
+		return nil, fmt.Errorf("expected handshake response status code %v but got %v", http.StatusSwitchingProtocols, resp.StatusCode)
+	}
+
+	if !headerContainsTokenIgnoreCase(resp.Header, "Connection", "Upgrade") {
+		return nil, fmt.Errorf("WebSocket protocol violation: Connection header %q does not contain Upgrade", resp.Header.Get("Connection"))
+	}
+
+	if !headerContainsTokenIgnoreCase(resp.Header, "Upgrade", "WebSocket") {
+		return nil, fmt.Errorf("WebSocket protocol violation: Upgrade header %q does not contain websocket", resp.Header.Get("Upgrade"))
+	}
+
+	if resp.Header.Get("Sec-WebSocket-Accept") != secWebSocketAccept(secWebSocketKey) {
+		return nil, fmt.Errorf("WebSocket protocol violation: invalid Sec-WebSocket-Accept %q, key %q",
+			resp.Header.Get("Sec-WebSocket-Accept"),
+			secWebSocketKey,
+		)
+	}
+
+	err := verifySubprotocol(opts.Subprotocols, resp)
+	if err != nil {
+		return nil, err
+	}
+
+	return verifyServerExtensions(copts, resp.Header)
+}
+
+func verifySubprotocol(subprotos []string, resp *http.Response) error {
+	proto := resp.Header.Get("Sec-WebSocket-Protocol")
+	if proto == "" {
+		return nil
+	}
+
+	for _, sp2 := range subprotos {
+		if strings.EqualFold(sp2, proto) {
+			return nil
+		}
+	}
+
+	return fmt.Errorf("WebSocket protocol violation: unexpected Sec-WebSocket-Protocol from server: %q", proto)
+}
+
+func verifyServerExtensions(copts *compressionOptions, h http.Header) (*compressionOptions, error) {
+	exts := websocketExtensions(h)
+	if len(exts) == 0 {
+		return nil, nil
+	}
+
+	ext := exts[0]
+	if ext.name != "permessage-deflate" || len(exts) > 1 || copts == nil {
+		return nil, fmt.Errorf("WebSocket protcol violation: unsupported extensions from server: %+v", exts[1:])
+	}
+
+	_copts := *copts
+	copts = &_copts
+
+	for _, p := range ext.params {
+		switch p {
+		case "client_no_context_takeover":
+			copts.clientNoContextTakeover = true
+			continue
+		case "server_no_context_takeover":
+			copts.serverNoContextTakeover = true
+			continue
+		}
+		if strings.HasPrefix(p, "server_max_window_bits=") {
+			// We can't adjust the deflate window, but decoding with a larger window is acceptable.
+			continue
+		}
+
+		return nil, fmt.Errorf("unsupported permessage-deflate parameter: %q", p)
+	}
+
+	return copts, nil
+}
+
+var bufioReaderPool sync.Pool
+
+func getBufioReader(r io.Reader) *bufio.Reader {
+	br, ok := bufioReaderPool.Get().(*bufio.Reader)
+	if !ok {
+		return bufio.NewReader(r)
+	}
+	br.Reset(r)
+	return br
+}
+
+func putBufioReader(br *bufio.Reader) {
+	bufioReaderPool.Put(br)
+}
+
+var bufioWriterPool sync.Pool
+
+func getBufioWriter(w io.Writer) *bufio.Writer {
+	bw, ok := bufioWriterPool.Get().(*bufio.Writer)
+	if !ok {
+		return bufio.NewWriter(w)
+	}
+	bw.Reset(w)
+	return bw
+}
+
+func putBufioWriter(bw *bufio.Writer) {
+	bufioWriterPool.Put(bw)
+}
diff --git a/vendor/github.com/coder/websocket/doc.go b/vendor/github.com/coder/websocket/doc.go
new file mode 100644
index 0000000..03edf12
--- /dev/null
+++ b/vendor/github.com/coder/websocket/doc.go
@@ -0,0 +1,34 @@
+//go:build !js
+// +build !js
+
+// Package websocket implements the RFC 6455 WebSocket protocol.
+//
+// https://tools.ietf.org/html/rfc6455
+//
+// Use Dial to dial a WebSocket server.
+//
+// Use Accept to accept a WebSocket client.
+//
+// Conn represents the resulting WebSocket connection.
+//
+// The examples are the best way to understand how to correctly use the library.
+//
+// The wsjson subpackage contain helpers for JSON and protobuf messages.
+//
+// More documentation at https://github.com/coder/websocket.
+//
+// # Wasm
+//
+// The client side supports compiling to Wasm.
+// It wraps the WebSocket browser API.
+//
+// See https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+//
+// Some important caveats to be aware of:
+//
+//   - Accept always errors out
+//   - Conn.Ping is no-op
+//   - Conn.CloseNow is Close(StatusGoingAway, "")
+//   - HTTPClient, HTTPHeader and CompressionMode in DialOptions are no-op
+//   - *http.Response from Dial is &http.Response{} with a 101 status code on success
+package websocket // import "github.com/coder/websocket"
diff --git a/vendor/github.com/coder/websocket/frame.go b/vendor/github.com/coder/websocket/frame.go
new file mode 100644
index 0000000..e7ab76b
--- /dev/null
+++ b/vendor/github.com/coder/websocket/frame.go
@@ -0,0 +1,173 @@
+//go:build !js
+
+package websocket
+
+import (
+	"bufio"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"math"
+
+	"github.com/coder/websocket/internal/errd"
+)
+
+// opcode represents a WebSocket opcode.
+type opcode int
+
+// https://tools.ietf.org/html/rfc6455#section-11.8.
+const (
+	opContinuation opcode = iota
+	opText
+	opBinary
+	// 3 - 7 are reserved for further non-control frames.
+	_
+	_
+	_
+	_
+	_
+	opClose
+	opPing
+	opPong
+	// 11-16 are reserved for further control frames.
+)
+
+// header represents a WebSocket frame header.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+type header struct {
+	fin    bool
+	rsv1   bool
+	rsv2   bool
+	rsv3   bool
+	opcode opcode
+
+	payloadLength int64
+
+	masked  bool
+	maskKey uint32
+}
+
+// readFrameHeader reads a header from the reader.
+// See https://tools.ietf.org/html/rfc6455#section-5.2.
+func readFrameHeader(r *bufio.Reader, readBuf []byte) (h header, err error) {
+	defer errd.Wrap(&err, "failed to read frame header")
+
+	b, err := r.ReadByte()
+	if err != nil {
+		return header{}, err
+	}
+
+	h.fin = b&(1<<7) != 0
+	h.rsv1 = b&(1<<6) != 0
+	h.rsv2 = b&(1<<5) != 0
+	h.rsv3 = b&(1<<4) != 0
+
+	h.opcode = opcode(b & 0xf)
+
+	b, err = r.ReadByte()
+	if err != nil {
+		return header{}, err
+	}
+
+	h.masked = b&(1<<7) != 0
+
+	payloadLength := b &^ (1 << 7)
+	switch {
+	case payloadLength < 126:
+		h.payloadLength = int64(payloadLength)
+	case payloadLength == 126:
+		_, err = io.ReadFull(r, readBuf[:2])
+		h.payloadLength = int64(binary.BigEndian.Uint16(readBuf))
+	case payloadLength == 127:
+		_, err = io.ReadFull(r, readBuf)
+		h.payloadLength = int64(binary.BigEndian.Uint64(readBuf))
+	}
+	if err != nil {
+		return header{}, err
+	}
+
+	if h.payloadLength < 0 {
+		return header{}, fmt.Errorf("received negative payload length: %v", h.payloadLength)
+	}
+
+	if h.masked {
+		_, err = io.ReadFull(r, readBuf[:4])
+		if err != nil {
+			return header{}, err
+		}
+		h.maskKey = binary.LittleEndian.Uint32(readBuf)
+	}
+
+	return h, nil
+}
+
+// maxControlPayload is the maximum length of a control frame payload.
+// See https://tools.ietf.org/html/rfc6455#section-5.5.
+const maxControlPayload = 125
+
+// writeFrameHeader writes the bytes of the header to w.
+// See https://tools.ietf.org/html/rfc6455#section-5.2
+func writeFrameHeader(h header, w *bufio.Writer, buf []byte) (err error) {
+	defer errd.Wrap(&err, "failed to write frame header")
+
+	var b byte
+	if h.fin {
+		b |= 1 << 7
+	}
+	if h.rsv1 {
+		b |= 1 << 6
+	}
+	if h.rsv2 {
+		b |= 1 << 5
+	}
+	if h.rsv3 {
+		b |= 1 << 4
+	}
+
+	b |= byte(h.opcode)
+
+	err = w.WriteByte(b)
+	if err != nil {
+		return err
+	}
+
+	lengthByte := byte(0)
+	if h.masked {
+		lengthByte |= 1 << 7
+	}
+
+	switch {
+	case h.payloadLength > math.MaxUint16:
+		lengthByte |= 127
+	case h.payloadLength > 125:
+		lengthByte |= 126
+	case h.payloadLength >= 0:
+		lengthByte |= byte(h.payloadLength)
+	}
+	err = w.WriteByte(lengthByte)
+	if err != nil {
+		return err
+	}
+
+	switch {
+	case h.payloadLength > math.MaxUint16:
+		binary.BigEndian.PutUint64(buf, uint64(h.payloadLength))
+		_, err = w.Write(buf)
+	case h.payloadLength > 125:
+		binary.BigEndian.PutUint16(buf, uint16(h.payloadLength))
+		_, err = w.Write(buf[:2])
+	}
+	if err != nil {
+		return err
+	}
+
+	if h.masked {
+		binary.LittleEndian.PutUint32(buf, h.maskKey)
+		_, err = w.Write(buf[:4])
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/coder/websocket/internal/bpool/bpool.go b/vendor/github.com/coder/websocket/internal/bpool/bpool.go
new file mode 100644
index 0000000..aa826fb
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/bpool/bpool.go
@@ -0,0 +1,24 @@
+package bpool
+
+import (
+	"bytes"
+	"sync"
+)
+
+var bpool sync.Pool
+
+// Get returns a buffer from the pool or creates a new one if
+// the pool is empty.
+func Get() *bytes.Buffer {
+	b := bpool.Get()
+	if b == nil {
+		return &bytes.Buffer{}
+	}
+	return b.(*bytes.Buffer)
+}
+
+// Put returns a buffer into the pool.
+func Put(b *bytes.Buffer) {
+	b.Reset()
+	bpool.Put(b)
+}
diff --git a/vendor/github.com/coder/websocket/internal/errd/wrap.go b/vendor/github.com/coder/websocket/internal/errd/wrap.go
new file mode 100644
index 0000000..6e77913
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/errd/wrap.go
@@ -0,0 +1,14 @@
+package errd
+
+import (
+	"fmt"
+)
+
+// Wrap wraps err with fmt.Errorf if err is non nil.
+// Intended for use with defer and a named error return.
+// Inspired by https://github.com/golang/go/issues/32676.
+func Wrap(err *error, f string, v ...interface{}) {
+	if *err != nil {
+		*err = fmt.Errorf(f+": %w", append(v, *err)...)
+	}
+}
diff --git a/vendor/github.com/coder/websocket/internal/util/util.go b/vendor/github.com/coder/websocket/internal/util/util.go
new file mode 100644
index 0000000..aa21070
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/util/util.go
@@ -0,0 +1,15 @@
+package util
+
+// WriterFunc is used to implement one off io.Writers.
+type WriterFunc func(p []byte) (int, error)
+
+func (f WriterFunc) Write(p []byte) (int, error) {
+	return f(p)
+}
+
+// ReaderFunc is used to implement one off io.Readers.
+type ReaderFunc func(p []byte) (int, error)
+
+func (f ReaderFunc) Read(p []byte) (int, error) {
+	return f(p)
+}
diff --git a/vendor/github.com/coder/websocket/internal/wsjs/wsjs_js.go b/vendor/github.com/coder/websocket/internal/wsjs/wsjs_js.go
new file mode 100644
index 0000000..11eb59c
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/wsjs/wsjs_js.go
@@ -0,0 +1,169 @@
+//go:build js
+// +build js
+
+// Package wsjs implements typed access to the browser javascript WebSocket API.
+//
+// https://developer.mozilla.org/en-US/docs/Web/API/WebSocket
+package wsjs
+
+import (
+	"syscall/js"
+)
+
+func handleJSError(err *error, onErr func()) {
+	r := recover()
+
+	if jsErr, ok := r.(js.Error); ok {
+		*err = jsErr
+
+		if onErr != nil {
+			onErr()
+		}
+		return
+	}
+
+	if r != nil {
+		panic(r)
+	}
+}
+
+// New is a wrapper around the javascript WebSocket constructor.
+func New(url string, protocols []string) (c WebSocket, err error) {
+	defer handleJSError(&err, func() {
+		c = WebSocket{}
+	})
+
+	jsProtocols := make([]interface{}, len(protocols))
+	for i, p := range protocols {
+		jsProtocols[i] = p
+	}
+
+	c = WebSocket{
+		v: js.Global().Get("WebSocket").New(url, jsProtocols),
+	}
+
+	c.setBinaryType("arraybuffer")
+
+	return c, nil
+}
+
+// WebSocket is a wrapper around a javascript WebSocket object.
+type WebSocket struct {
+	v js.Value
+}
+
+func (c WebSocket) setBinaryType(typ string) {
+	c.v.Set("binaryType", string(typ))
+}
+
+func (c WebSocket) addEventListener(eventType string, fn func(e js.Value)) func() {
+	f := js.FuncOf(func(this js.Value, args []js.Value) interface{} {
+		fn(args[0])
+		return nil
+	})
+	c.v.Call("addEventListener", eventType, f)
+
+	return func() {
+		c.v.Call("removeEventListener", eventType, f)
+		f.Release()
+	}
+}
+
+// CloseEvent is the type passed to a WebSocket close handler.
+type CloseEvent struct {
+	Code     uint16
+	Reason   string
+	WasClean bool
+}
+
+// OnClose registers a function to be called when the WebSocket is closed.
+func (c WebSocket) OnClose(fn func(CloseEvent)) (remove func()) {
+	return c.addEventListener("close", func(e js.Value) {
+		ce := CloseEvent{
+			Code:     uint16(e.Get("code").Int()),
+			Reason:   e.Get("reason").String(),
+			WasClean: e.Get("wasClean").Bool(),
+		}
+		fn(ce)
+	})
+}
+
+// OnError registers a function to be called when there is an error
+// with the WebSocket.
+func (c WebSocket) OnError(fn func(e js.Value)) (remove func()) {
+	return c.addEventListener("error", fn)
+}
+
+// MessageEvent is the type passed to a message handler.
+type MessageEvent struct {
+	// string or []byte.
+	Data interface{}
+
+	// There are more fields to the interface but we don't use them.
+	// See https://developer.mozilla.org/en-US/docs/Web/API/MessageEvent
+}
+
+// OnMessage registers a function to be called when the WebSocket receives a message.
+func (c WebSocket) OnMessage(fn func(m MessageEvent)) (remove func()) {
+	return c.addEventListener("message", func(e js.Value) {
+		var data interface{}
+
+		arrayBuffer := e.Get("data")
+		if arrayBuffer.Type() == js.TypeString {
+			data = arrayBuffer.String()
+		} else {
+			data = extractArrayBuffer(arrayBuffer)
+		}
+
+		me := MessageEvent{
+			Data: data,
+		}
+		fn(me)
+	})
+}
+
+// Subprotocol returns the WebSocket subprotocol in use.
+func (c WebSocket) Subprotocol() string {
+	return c.v.Get("protocol").String()
+}
+
+// OnOpen registers a function to be called when the WebSocket is opened.
+func (c WebSocket) OnOpen(fn func(e js.Value)) (remove func()) {
+	return c.addEventListener("open", fn)
+}
+
+// Close closes the WebSocket with the given code and reason.
+func (c WebSocket) Close(code int, reason string) (err error) {
+	defer handleJSError(&err, nil)
+	c.v.Call("close", code, reason)
+	return err
+}
+
+// SendText sends the given string as a text message
+// on the WebSocket.
+func (c WebSocket) SendText(v string) (err error) {
+	defer handleJSError(&err, nil)
+	c.v.Call("send", v)
+	return err
+}
+
+// SendBytes sends the given message as a binary message
+// on the WebSocket.
+func (c WebSocket) SendBytes(v []byte) (err error) {
+	defer handleJSError(&err, nil)
+	c.v.Call("send", uint8Array(v))
+	return err
+}
+
+func extractArrayBuffer(arrayBuffer js.Value) []byte {
+	uint8Array := js.Global().Get("Uint8Array").New(arrayBuffer)
+	dst := make([]byte, uint8Array.Length())
+	js.CopyBytesToGo(dst, uint8Array)
+	return dst
+}
+
+func uint8Array(src []byte) js.Value {
+	uint8Array := js.Global().Get("Uint8Array").New(len(src))
+	js.CopyBytesToJS(uint8Array, src)
+	return uint8Array
+}
diff --git a/vendor/github.com/coder/websocket/internal/xsync/go.go b/vendor/github.com/coder/websocket/internal/xsync/go.go
new file mode 100644
index 0000000..5229b12
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/xsync/go.go
@@ -0,0 +1,26 @@
+package xsync
+
+import (
+	"fmt"
+	"runtime/debug"
+)
+
+// Go allows running a function in another goroutine
+// and waiting for its error.
+func Go(fn func() error) <-chan error {
+	errs := make(chan error, 1)
+	go func() {
+		defer func() {
+			r := recover()
+			if r != nil {
+				select {
+				case errs <- fmt.Errorf("panic in go fn: %v, %s", r, debug.Stack()):
+				default:
+				}
+			}
+		}()
+		errs <- fn()
+	}()
+
+	return errs
+}
diff --git a/vendor/github.com/coder/websocket/internal/xsync/int64.go b/vendor/github.com/coder/websocket/internal/xsync/int64.go
new file mode 100644
index 0000000..a0c4020
--- /dev/null
+++ b/vendor/github.com/coder/websocket/internal/xsync/int64.go
@@ -0,0 +1,23 @@
+package xsync
+
+import (
+	"sync/atomic"
+)
+
+// Int64 represents an atomic int64.
+type Int64 struct {
+	// We do not use atomic.Load/StoreInt64 since it does not
+	// work on 32 bit computers but we need 64 bit integers.
+	i atomic.Value
+}
+
+// Load loads the int64.
+func (v *Int64) Load() int64 {
+	i, _ := v.i.Load().(int64)
+	return i
+}
+
+// Store stores the int64.
+func (v *Int64) Store(i int64) {
+	v.i.Store(i)
+}
diff --git a/vendor/github.com/coder/websocket/make.sh b/vendor/github.com/coder/websocket/make.sh
new file mode 100644
index 0000000..170d00a
--- /dev/null
+++ b/vendor/github.com/coder/websocket/make.sh
@@ -0,0 +1,12 @@
+#!/bin/sh
+set -eu
+cd -- "$(dirname "$0")"
+
+echo "=== fmt.sh"
+./ci/fmt.sh
+echo "=== lint.sh"
+./ci/lint.sh
+echo "=== test.sh"
+./ci/test.sh "$@"
+echo "=== bench.sh"
+./ci/bench.sh
diff --git a/vendor/github.com/coder/websocket/mask.go b/vendor/github.com/coder/websocket/mask.go
new file mode 100644
index 0000000..7bc0c8d
--- /dev/null
+++ b/vendor/github.com/coder/websocket/mask.go
@@ -0,0 +1,128 @@
+package websocket
+
+import (
+	"encoding/binary"
+	"math/bits"
+)
+
+// maskGo applies the WebSocket masking algorithm to p
+// with the given key.
+// See https://tools.ietf.org/html/rfc6455#section-5.3
+//
+// The returned value is the correctly rotated key to
+// to continue to mask/unmask the message.
+//
+// It is optimized for LittleEndian and expects the key
+// to be in little endian.
+//
+// See https://github.com/golang/go/issues/31586
+func maskGo(b []byte, key uint32) uint32 {
+	if len(b) >= 8 {
+		key64 := uint64(key)<<32 | uint64(key)
+
+		// At some point in the future we can clean these unrolled loops up.
+		// See https://github.com/golang/go/issues/31586#issuecomment-487436401
+
+		// Then we xor until b is less than 128 bytes.
+		for len(b) >= 128 {
+			v := binary.LittleEndian.Uint64(b)
+			binary.LittleEndian.PutUint64(b, v^key64)
+			v = binary.LittleEndian.Uint64(b[8:16])
+			binary.LittleEndian.PutUint64(b[8:16], v^key64)
+			v = binary.LittleEndian.Uint64(b[16:24])
+			binary.LittleEndian.PutUint64(b[16:24], v^key64)
+			v = binary.LittleEndian.Uint64(b[24:32])
+			binary.LittleEndian.PutUint64(b[24:32], v^key64)
+			v = binary.LittleEndian.Uint64(b[32:40])
+			binary.LittleEndian.PutUint64(b[32:40], v^key64)
+			v = binary.LittleEndian.Uint64(b[40:48])
+			binary.LittleEndian.PutUint64(b[40:48], v^key64)
+			v = binary.LittleEndian.Uint64(b[48:56])
+			binary.LittleEndian.PutUint64(b[48:56], v^key64)
+			v = binary.LittleEndian.Uint64(b[56:64])
+			binary.LittleEndian.PutUint64(b[56:64], v^key64)
+			v = binary.LittleEndian.Uint64(b[64:72])
+			binary.LittleEndian.PutUint64(b[64:72], v^key64)
+			v = binary.LittleEndian.Uint64(b[72:80])
+			binary.LittleEndian.PutUint64(b[72:80], v^key64)
+			v = binary.LittleEndian.Uint64(b[80:88])
+			binary.LittleEndian.PutUint64(b[80:88], v^key64)
+			v = binary.LittleEndian.Uint64(b[88:96])
+			binary.LittleEndian.PutUint64(b[88:96], v^key64)
+			v = binary.LittleEndian.Uint64(b[96:104])
+			binary.LittleEndian.PutUint64(b[96:104], v^key64)
+			v = binary.LittleEndian.Uint64(b[104:112])
+			binary.LittleEndian.PutUint64(b[104:112], v^key64)
+			v = binary.LittleEndian.Uint64(b[112:120])
+			binary.LittleEndian.PutUint64(b[112:120], v^key64)
+			v = binary.LittleEndian.Uint64(b[120:128])
+			binary.LittleEndian.PutUint64(b[120:128], v^key64)
+			b = b[128:]
+		}
+
+		// Then we xor until b is less than 64 bytes.
+		for len(b) >= 64 {
+			v := binary.LittleEndian.Uint64(b)
+			binary.LittleEndian.PutUint64(b, v^key64)
+			v = binary.LittleEndian.Uint64(b[8:16])
+			binary.LittleEndian.PutUint64(b[8:16], v^key64)
+			v = binary.LittleEndian.Uint64(b[16:24])
+			binary.LittleEndian.PutUint64(b[16:24], v^key64)
+			v = binary.LittleEndian.Uint64(b[24:32])
+			binary.LittleEndian.PutUint64(b[24:32], v^key64)
+			v = binary.LittleEndian.Uint64(b[32:40])
+			binary.LittleEndian.PutUint64(b[32:40], v^key64)
+			v = binary.LittleEndian.Uint64(b[40:48])
+			binary.LittleEndian.PutUint64(b[40:48], v^key64)
+			v = binary.LittleEndian.Uint64(b[48:56])
+			binary.LittleEndian.PutUint64(b[48:56], v^key64)
+			v = binary.LittleEndian.Uint64(b[56:64])
+			binary.LittleEndian.PutUint64(b[56:64], v^key64)
+			b = b[64:]
+		}
+
+		// Then we xor until b is less than 32 bytes.
+		for len(b) >= 32 {
+			v := binary.LittleEndian.Uint64(b)
+			binary.LittleEndian.PutUint64(b, v^key64)
+			v = binary.LittleEndian.Uint64(b[8:16])
+			binary.LittleEndian.PutUint64(b[8:16], v^key64)
+			v = binary.LittleEndian.Uint64(b[16:24])
+			binary.LittleEndian.PutUint64(b[16:24], v^key64)
+			v = binary.LittleEndian.Uint64(b[24:32])
+			binary.LittleEndian.PutUint64(b[24:32], v^key64)
+			b = b[32:]
+		}
+
+		// Then we xor until b is less than 16 bytes.
+		for len(b) >= 16 {
+			v := binary.LittleEndian.Uint64(b)
+			binary.LittleEndian.PutUint64(b, v^key64)
+			v = binary.LittleEndian.Uint64(b[8:16])
+			binary.LittleEndian.PutUint64(b[8:16], v^key64)
+			b = b[16:]
+		}
+
+		// Then we xor until b is less than 8 bytes.
+		for len(b) >= 8 {
+			v := binary.LittleEndian.Uint64(b)
+			binary.LittleEndian.PutUint64(b, v^key64)
+			b = b[8:]
+		}
+	}
+
+	// Then we xor until b is less than 4 bytes.
+	for len(b) >= 4 {
+		v := binary.LittleEndian.Uint32(b)
+		binary.LittleEndian.PutUint32(b, v^key)
+		b = b[4:]
+	}
+
+	// xor remaining bytes.
+	for i := range b {
+		b[i] ^= byte(key)
+		key = bits.RotateLeft32(key, -8)
+	}
+
+	return key
+}
diff --git a/vendor/github.com/coder/websocket/mask_amd64.s b/vendor/github.com/coder/websocket/mask_amd64.s
new file mode 100644
index 0000000..bd42be3
--- /dev/null
+++ b/vendor/github.com/coder/websocket/mask_amd64.s
@@ -0,0 +1,127 @@
+#include "textflag.h"
+
+// func maskAsm(b *byte, len int, key uint32)
+TEXT ·maskAsm(SB), NOSPLIT, $0-28
+	// AX = b
+	// CX = len (left length)
+	// SI = key (uint32)
+	// DI = uint64(SI) | uint64(SI)<<32
+	MOVQ b+0(FP), AX
+	MOVQ len+8(FP), CX
+	MOVL key+16(FP), SI
+
+	// calculate the DI
+	// DI = SI<<32 | SI
+	MOVL SI, DI
+	MOVQ DI, DX
+	SHLQ $32, DI
+	ORQ  DX, DI
+
+	CMPQ  CX, $15
+	JLE   less_than_16
+	CMPQ  CX, $63
+	JLE   less_than_64
+	CMPQ  CX, $128
+	JLE   sse
+	TESTQ $31, AX
+	JNZ   unaligned
+
+unaligned_loop_1byte:
+	XORB  SI, (AX)
+	INCQ  AX
+	DECQ  CX
+	ROLL  $24, SI
+	TESTQ $7, AX
+	JNZ   unaligned_loop_1byte
+
+	// calculate DI again since SI was modified
+	// DI = SI<<32 | SI
+	MOVL SI, DI
+	MOVQ DI, DX
+	SHLQ $32, DI
+	ORQ  DX, DI
+
+	TESTQ $31, AX
+	JZ    sse
+
+unaligned:
+	TESTQ $7, AX               // AND $7 & len, if not zero jump to loop_1b.
+	JNZ   unaligned_loop_1byte
+
+unaligned_loop:
+	// we don't need to check the CX since we know it's above 128
+	XORQ  DI, (AX)
+	ADDQ  $8, AX
+	SUBQ  $8, CX
+	TESTQ $31, AX
+	JNZ   unaligned_loop
+	JMP   sse
+
+sse:
+	CMPQ       CX, $0x40
+	JL         less_than_64
+	MOVQ       DI, X0
+	PUNPCKLQDQ X0, X0
+
+sse_loop:
+	MOVOU 0*16(AX), X1
+	MOVOU 1*16(AX), X2
+	MOVOU 2*16(AX), X3
+	MOVOU 3*16(AX), X4
+	PXOR  X0, X1
+	PXOR  X0, X2
+	PXOR  X0, X3
+	PXOR  X0, X4
+	MOVOU X1, 0*16(AX)
+	MOVOU X2, 1*16(AX)
+	MOVOU X3, 2*16(AX)
+	MOVOU X4, 3*16(AX)
+	ADDQ  $0x40, AX
+	SUBQ  $0x40, CX
+	CMPQ  CX, $0x40
+	JAE   sse_loop
+
+less_than_64:
+	TESTQ $32, CX
+	JZ    less_than_32
+	XORQ  DI, (AX)
+	XORQ  DI, 8(AX)
+	XORQ  DI, 16(AX)
+	XORQ  DI, 24(AX)
+	ADDQ  $32, AX
+
+less_than_32:
+	TESTQ $16, CX
+	JZ    less_than_16
+	XORQ  DI, (AX)
+	XORQ  DI, 8(AX)
+	ADDQ  $16, AX
+
+less_than_16:
+	TESTQ $8, CX
+	JZ    less_than_8
+	XORQ  DI, (AX)
+	ADDQ  $8, AX
+
+less_than_8:
+	TESTQ $4, CX
+	JZ    less_than_4
+	XORL  SI, (AX)
+	ADDQ  $4, AX
+
+less_than_4:
+	TESTQ $2, CX
+	JZ    less_than_2
+	XORW  SI, (AX)
+	ROLL  $16, SI
+	ADDQ  $2, AX
+
+less_than_2:
+	TESTQ $1, CX
+	JZ    done
+	XORB  SI, (AX)
+	ROLL  $24, SI
+
+done:
+	MOVL SI, ret+24(FP)
+	RET
diff --git a/vendor/github.com/coder/websocket/mask_arm64.s b/vendor/github.com/coder/websocket/mask_arm64.s
new file mode 100644
index 0000000..e494b43
--- /dev/null
+++ b/vendor/github.com/coder/websocket/mask_arm64.s
@@ -0,0 +1,72 @@
+#include "textflag.h"
+
+// func maskAsm(b *byte, len int, key uint32)
+TEXT ·maskAsm(SB), NOSPLIT, $0-28
+	// R0 = b
+	// R1 = len
+	// R3 = key (uint32)
+	// R2 = uint64(key)<<32 | uint64(key)
+	MOVD  b_ptr+0(FP), R0
+	MOVD  b_len+8(FP), R1
+	MOVWU key+16(FP), R3
+	MOVD  R3, R2
+	ORR   R2<<32, R2, R2
+	VDUP  R2, V0.D2
+	CMP   $64, R1
+	BLT   less_than_64
+
+loop_64:
+	VLD1   (R0), [V1.B16, V2.B16, V3.B16, V4.B16]
+	VEOR   V1.B16, V0.B16, V1.B16
+	VEOR   V2.B16, V0.B16, V2.B16
+	VEOR   V3.B16, V0.B16, V3.B16
+	VEOR   V4.B16, V0.B16, V4.B16
+	VST1.P [V1.B16, V2.B16, V3.B16, V4.B16], 64(R0)
+	SUBS   $64, R1
+	CMP    $64, R1
+	BGE    loop_64
+
+less_than_64:
+	CBZ    R1, end
+	TBZ    $5, R1, less_than_32
+	VLD1   (R0), [V1.B16, V2.B16]
+	VEOR   V1.B16, V0.B16, V1.B16
+	VEOR   V2.B16, V0.B16, V2.B16
+	VST1.P [V1.B16, V2.B16], 32(R0)
+
+less_than_32:
+	TBZ   $4, R1, less_than_16
+	LDP   (R0), (R11, R12)
+	EOR   R11, R2, R11
+	EOR   R12, R2, R12
+	STP.P (R11, R12), 16(R0)
+
+less_than_16:
+	TBZ    $3, R1, less_than_8
+	MOVD   (R0), R11
+	EOR    R2, R11, R11
+	MOVD.P R11, 8(R0)
+
+less_than_8:
+	TBZ     $2, R1, less_than_4
+	MOVWU   (R0), R11
+	EORW    R2, R11, R11
+	MOVWU.P R11, 4(R0)
+
+less_than_4:
+	TBZ     $1, R1, less_than_2
+	MOVHU   (R0), R11
+	EORW    R3, R11, R11
+	MOVHU.P R11, 2(R0)
+	RORW    $16, R3
+
+less_than_2:
+	TBZ     $0, R1, end
+	MOVBU   (R0), R11
+	EORW    R3, R11, R11
+	MOVBU.P R11, 1(R0)
+	RORW    $8, R3
+
+end:
+	MOVWU R3, ret+24(FP)
+	RET
diff --git a/vendor/github.com/coder/websocket/mask_asm.go b/vendor/github.com/coder/websocket/mask_asm.go
new file mode 100644
index 0000000..f9484b5
--- /dev/null
+++ b/vendor/github.com/coder/websocket/mask_asm.go
@@ -0,0 +1,26 @@
+//go:build amd64 || arm64
+
+package websocket
+
+func mask(b []byte, key uint32) uint32 {
+	// TODO: Will enable in v1.9.0.
+	return maskGo(b, key)
+	/*
+		if len(b) > 0 {
+			return maskAsm(&b[0], len(b), key)
+		}
+		return key
+	*/
+}
+
+// @nhooyr: I am not confident that the amd64 or the arm64 implementations of this
+// function are perfect. There are almost certainly missing optimizations or
+// opportunities for simplification. I'm confident there are no bugs though.
+// For example, the arm64 implementation doesn't align memory like the amd64.
+// Or the amd64 implementation could use AVX512 instead of just AVX2.
+// The AVX2 code I had to disable anyway as it wasn't performing as expected.
+// See https://github.com/nhooyr/websocket/pull/326#issuecomment-1771138049
+//
+//go:noescape
+//lint:ignore U1000 disabled till v1.9.0
+func maskAsm(b *byte, len int, key uint32) uint32
diff --git a/vendor/github.com/coder/websocket/mask_go.go b/vendor/github.com/coder/websocket/mask_go.go
new file mode 100644
index 0000000..b29435e
--- /dev/null
+++ b/vendor/github.com/coder/websocket/mask_go.go
@@ -0,0 +1,7 @@
+//go:build !amd64 && !arm64 && !js
+
+package websocket
+
+func mask(b []byte, key uint32) uint32 {
+	return maskGo(b, key)
+}
diff --git a/vendor/github.com/coder/websocket/netconn.go b/vendor/github.com/coder/websocket/netconn.go
new file mode 100644
index 0000000..86f7dad
--- /dev/null
+++ b/vendor/github.com/coder/websocket/netconn.go
@@ -0,0 +1,237 @@
+package websocket
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"math"
+	"net"
+	"sync/atomic"
+	"time"
+)
+
+// NetConn converts a *websocket.Conn into a net.Conn.
+//
+// It's for tunneling arbitrary protocols over WebSockets.
+// Few users of the library will need this but it's tricky to implement
+// correctly and so provided in the library.
+// See https://github.com/nhooyr/websocket/issues/100.
+//
+// Every Write to the net.Conn will correspond to a message write of
+// the given type on *websocket.Conn.
+//
+// The passed ctx bounds the lifetime of the net.Conn. If cancelled,
+// all reads and writes on the net.Conn will be cancelled.
+//
+// If a message is read that is not of the correct type, the connection
+// will be closed with StatusUnsupportedData and an error will be returned.
+//
+// Close will close the *websocket.Conn with StatusNormalClosure.
+//
+// When a deadline is hit and there is an active read or write goroutine, the
+// connection will be closed. This is different from most net.Conn implementations
+// where only the reading/writing goroutines are interrupted but the connection
+// is kept alive.
+//
+// The Addr methods will return the real addresses for connections obtained
+// from websocket.Accept. But for connections obtained from websocket.Dial, a mock net.Addr
+// will be returned that gives "websocket" for Network() and "websocket/unknown-addr" for
+// String(). This is because websocket.Dial only exposes a io.ReadWriteCloser instead of the
+// full net.Conn to us.
+//
+// When running as WASM, the Addr methods will always return the mock address described above.
+//
+// A received StatusNormalClosure or StatusGoingAway close frame will be translated to
+// io.EOF when reading.
+//
+// Furthermore, the ReadLimit is set to -1 to disable it.
+func NetConn(ctx context.Context, c *Conn, msgType MessageType) net.Conn {
+	c.SetReadLimit(-1)
+
+	nc := &netConn{
+		c:       c,
+		msgType: msgType,
+		readMu:  newMu(c),
+		writeMu: newMu(c),
+	}
+
+	nc.writeCtx, nc.writeCancel = context.WithCancel(ctx)
+	nc.readCtx, nc.readCancel = context.WithCancel(ctx)
+
+	nc.writeTimer = time.AfterFunc(math.MaxInt64, func() {
+		if !nc.writeMu.tryLock() {
+			// If the lock cannot be acquired, then there is an
+			// active write goroutine and so we should cancel the context.
+			nc.writeCancel()
+			return
+		}
+		defer nc.writeMu.unlock()
+
+		// Prevents future writes from writing until the deadline is reset.
+		atomic.StoreInt64(&nc.writeExpired, 1)
+	})
+	if !nc.writeTimer.Stop() {
+		<-nc.writeTimer.C
+	}
+
+	nc.readTimer = time.AfterFunc(math.MaxInt64, func() {
+		if !nc.readMu.tryLock() {
+			// If the lock cannot be acquired, then there is an
+			// active read goroutine and so we should cancel the context.
+			nc.readCancel()
+			return
+		}
+		defer nc.readMu.unlock()
+
+		// Prevents future reads from reading until the deadline is reset.
+		atomic.StoreInt64(&nc.readExpired, 1)
+	})
+	if !nc.readTimer.Stop() {
+		<-nc.readTimer.C
+	}
+
+	return nc
+}
+
+type netConn struct {
+	// These must be first to be aligned on 32 bit platforms.
+	// https://github.com/nhooyr/websocket/pull/438
+	readExpired  int64
+	writeExpired int64
+
+	c       *Conn
+	msgType MessageType
+
+	writeTimer  *time.Timer
+	writeMu     *mu
+	writeCtx    context.Context
+	writeCancel context.CancelFunc
+
+	readTimer  *time.Timer
+	readMu     *mu
+	readCtx    context.Context
+	readCancel context.CancelFunc
+	readEOFed  bool
+	reader     io.Reader
+}
+
+var _ net.Conn = &netConn{}
+
+func (nc *netConn) Close() error {
+	nc.writeTimer.Stop()
+	nc.writeCancel()
+	nc.readTimer.Stop()
+	nc.readCancel()
+	return nc.c.Close(StatusNormalClosure, "")
+}
+
+func (nc *netConn) Write(p []byte) (int, error) {
+	nc.writeMu.forceLock()
+	defer nc.writeMu.unlock()
+
+	if atomic.LoadInt64(&nc.writeExpired) == 1 {
+		return 0, fmt.Errorf("failed to write: %w", context.DeadlineExceeded)
+	}
+
+	err := nc.c.Write(nc.writeCtx, nc.msgType, p)
+	if err != nil {
+		return 0, err
+	}
+	return len(p), nil
+}
+
+func (nc *netConn) Read(p []byte) (int, error) {
+	nc.readMu.forceLock()
+	defer nc.readMu.unlock()
+
+	for {
+		n, err := nc.read(p)
+		if err != nil {
+			return n, err
+		}
+		if n == 0 {
+			continue
+		}
+		return n, nil
+	}
+}
+
+func (nc *netConn) read(p []byte) (int, error) {
+	if atomic.LoadInt64(&nc.readExpired) == 1 {
+		return 0, fmt.Errorf("failed to read: %w", context.DeadlineExceeded)
+	}
+
+	if nc.readEOFed {
+		return 0, io.EOF
+	}
+
+	if nc.reader == nil {
+		typ, r, err := nc.c.Reader(nc.readCtx)
+		if err != nil {
+			switch CloseStatus(err) {
+			case StatusNormalClosure, StatusGoingAway:
+				nc.readEOFed = true
+				return 0, io.EOF
+			}
+			return 0, err
+		}
+		if typ != nc.msgType {
+			err := fmt.Errorf("unexpected frame type read (expected %v): %v", nc.msgType, typ)
+			nc.c.Close(StatusUnsupportedData, err.Error())
+			return 0, err
+		}
+		nc.reader = r
+	}
+
+	n, err := nc.reader.Read(p)
+	if err == io.EOF {
+		nc.reader = nil
+		err = nil
+	}
+	return n, err
+}
+
+type websocketAddr struct {
+}
+
+func (a websocketAddr) Network() string {
+	return "websocket"
+}
+
+func (a websocketAddr) String() string {
+	return "websocket/unknown-addr"
+}
+
+func (nc *netConn) SetDeadline(t time.Time) error {
+	nc.SetWriteDeadline(t)
+	nc.SetReadDeadline(t)
+	return nil
+}
+
+func (nc *netConn) SetWriteDeadline(t time.Time) error {
+	atomic.StoreInt64(&nc.writeExpired, 0)
+	if t.IsZero() {
+		nc.writeTimer.Stop()
+	} else {
+		dur := time.Until(t)
+		if dur <= 0 {
+			dur = 1
+		}
+		nc.writeTimer.Reset(dur)
+	}
+	return nil
+}
+
+func (nc *netConn) SetReadDeadline(t time.Time) error {
+	atomic.StoreInt64(&nc.readExpired, 0)
+	if t.IsZero() {
+		nc.readTimer.Stop()
+	} else {
+		dur := time.Until(t)
+		if dur <= 0 {
+			dur = 1
+		}
+		nc.readTimer.Reset(dur)
+	}
+	return nil
+}
diff --git a/vendor/github.com/coder/websocket/netconn_js.go b/vendor/github.com/coder/websocket/netconn_js.go
new file mode 100644
index 0000000..ccc8c89
--- /dev/null
+++ b/vendor/github.com/coder/websocket/netconn_js.go
@@ -0,0 +1,11 @@
+package websocket
+
+import "net"
+
+func (nc *netConn) RemoteAddr() net.Addr {
+	return websocketAddr{}
+}
+
+func (nc *netConn) LocalAddr() net.Addr {
+	return websocketAddr{}
+}
diff --git a/vendor/github.com/coder/websocket/netconn_notjs.go b/vendor/github.com/coder/websocket/netconn_notjs.go
new file mode 100644
index 0000000..f3eb0d6
--- /dev/null
+++ b/vendor/github.com/coder/websocket/netconn_notjs.go
@@ -0,0 +1,20 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import "net"
+
+func (nc *netConn) RemoteAddr() net.Addr {
+	if unc, ok := nc.c.rwc.(net.Conn); ok {
+		return unc.RemoteAddr()
+	}
+	return websocketAddr{}
+}
+
+func (nc *netConn) LocalAddr() net.Addr {
+	if unc, ok := nc.c.rwc.(net.Conn); ok {
+		return unc.LocalAddr()
+	}
+	return websocketAddr{}
+}
diff --git a/vendor/github.com/coder/websocket/read.go b/vendor/github.com/coder/websocket/read.go
new file mode 100644
index 0000000..1b9404b
--- /dev/null
+++ b/vendor/github.com/coder/websocket/read.go
@@ -0,0 +1,506 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"bufio"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"time"
+
+	"github.com/coder/websocket/internal/errd"
+	"github.com/coder/websocket/internal/util"
+	"github.com/coder/websocket/internal/xsync"
+)
+
+// Reader reads from the connection until there is a WebSocket
+// data message to be read. It will handle ping, pong and close frames as appropriate.
+//
+// It returns the type of the message and an io.Reader to read it.
+// The passed context will also bound the reader.
+// Ensure you read to EOF otherwise the connection will hang.
+//
+// Call CloseRead if you do not expect any data messages from the peer.
+//
+// Only one Reader may be open at a time.
+//
+// If you need a separate timeout on the Reader call and the Read itself,
+// use time.AfterFunc to cancel the context passed in.
+// See https://github.com/nhooyr/websocket/issues/87#issue-451703332
+// Most users should not need this.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+	return c.reader(ctx)
+}
+
+// Read is a convenience method around Reader to read a single message
+// from the connection.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+	typ, r, err := c.Reader(ctx)
+	if err != nil {
+		return 0, nil, err
+	}
+
+	b, err := io.ReadAll(r)
+	return typ, b, err
+}
+
+// CloseRead starts a goroutine to read from the connection until it is closed
+// or a data message is received.
+//
+// Once CloseRead is called you cannot read any messages from the connection.
+// The returned context will be cancelled when the connection is closed.
+//
+// If a data message is received, the connection will be closed with StatusPolicyViolation.
+//
+// Call CloseRead when you do not expect to read any more messages.
+// Since it actively reads from the connection, it will ensure that ping, pong and close
+// frames are responded to. This means c.Ping and c.Close will still work as expected.
+//
+// This function is idempotent.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+	c.closeReadMu.Lock()
+	ctx2 := c.closeReadCtx
+	if ctx2 != nil {
+		c.closeReadMu.Unlock()
+		return ctx2
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	c.closeReadCtx = ctx
+	c.closeReadDone = make(chan struct{})
+	c.closeReadMu.Unlock()
+
+	go func() {
+		defer close(c.closeReadDone)
+		defer cancel()
+		defer c.close()
+		_, _, err := c.Reader(ctx)
+		if err == nil {
+			c.Close(StatusPolicyViolation, "unexpected data message")
+		}
+	}()
+	return ctx
+}
+
+// SetReadLimit sets the max number of bytes to read for a single message.
+// It applies to the Reader and Read methods.
+//
+// By default, the connection has a message read limit of 32768 bytes.
+//
+// When the limit is hit, the connection will be closed with StatusMessageTooBig.
+//
+// Set to -1 to disable.
+func (c *Conn) SetReadLimit(n int64) {
+	if n >= 0 {
+		// We read one more byte than the limit in case
+		// there is a fin frame that needs to be read.
+		n++
+	}
+
+	c.msgReader.limitReader.limit.Store(n)
+}
+
+const defaultReadLimit = 32768
+
+func newMsgReader(c *Conn) *msgReader {
+	mr := &msgReader{
+		c:   c,
+		fin: true,
+	}
+	mr.readFunc = mr.read
+
+	mr.limitReader = newLimitReader(c, mr.readFunc, defaultReadLimit+1)
+	return mr
+}
+
+func (mr *msgReader) resetFlate() {
+	if mr.flateContextTakeover() {
+		if mr.dict == nil {
+			mr.dict = &slidingWindow{}
+		}
+		mr.dict.init(32768)
+	}
+	if mr.flateBufio == nil {
+		mr.flateBufio = getBufioReader(mr.readFunc)
+	}
+
+	if mr.flateContextTakeover() {
+		mr.flateReader = getFlateReader(mr.flateBufio, mr.dict.buf)
+	} else {
+		mr.flateReader = getFlateReader(mr.flateBufio, nil)
+	}
+	mr.limitReader.r = mr.flateReader
+	mr.flateTail.Reset(deflateMessageTail)
+}
+
+func (mr *msgReader) putFlateReader() {
+	if mr.flateReader != nil {
+		putFlateReader(mr.flateReader)
+		mr.flateReader = nil
+	}
+}
+
+func (mr *msgReader) close() {
+	mr.c.readMu.forceLock()
+	mr.putFlateReader()
+	if mr.dict != nil {
+		mr.dict.close()
+		mr.dict = nil
+	}
+	if mr.flateBufio != nil {
+		putBufioReader(mr.flateBufio)
+	}
+
+	if mr.c.client {
+		putBufioReader(mr.c.br)
+		mr.c.br = nil
+	}
+}
+
+func (mr *msgReader) flateContextTakeover() bool {
+	if mr.c.client {
+		return !mr.c.copts.serverNoContextTakeover
+	}
+	return !mr.c.copts.clientNoContextTakeover
+}
+
+func (c *Conn) readRSV1Illegal(h header) bool {
+	// If compression is disabled, rsv1 is illegal.
+	if !c.flate() {
+		return true
+	}
+	// rsv1 is only allowed on data frames beginning messages.
+	if h.opcode != opText && h.opcode != opBinary {
+		return true
+	}
+	return false
+}
+
+func (c *Conn) readLoop(ctx context.Context) (header, error) {
+	for {
+		h, err := c.readFrameHeader(ctx)
+		if err != nil {
+			return header{}, err
+		}
+
+		if h.rsv1 && c.readRSV1Illegal(h) || h.rsv2 || h.rsv3 {
+			err := fmt.Errorf("received header with unexpected rsv bits set: %v:%v:%v", h.rsv1, h.rsv2, h.rsv3)
+			c.writeError(StatusProtocolError, err)
+			return header{}, err
+		}
+
+		if !c.client && !h.masked {
+			return header{}, errors.New("received unmasked frame from client")
+		}
+
+		switch h.opcode {
+		case opClose, opPing, opPong:
+			err = c.handleControl(ctx, h)
+			if err != nil {
+				// Pass through CloseErrors when receiving a close frame.
+				if h.opcode == opClose && CloseStatus(err) != -1 {
+					return header{}, err
+				}
+				return header{}, fmt.Errorf("failed to handle control frame %v: %w", h.opcode, err)
+			}
+		case opContinuation, opText, opBinary:
+			return h, nil
+		default:
+			err := fmt.Errorf("received unknown opcode %v", h.opcode)
+			c.writeError(StatusProtocolError, err)
+			return header{}, err
+		}
+	}
+}
+
+func (c *Conn) readFrameHeader(ctx context.Context) (header, error) {
+	select {
+	case <-c.closed:
+		return header{}, net.ErrClosed
+	case c.readTimeout <- ctx:
+	}
+
+	h, err := readFrameHeader(c.br, c.readHeaderBuf[:])
+	if err != nil {
+		select {
+		case <-c.closed:
+			return header{}, net.ErrClosed
+		case <-ctx.Done():
+			return header{}, ctx.Err()
+		default:
+			return header{}, err
+		}
+	}
+
+	select {
+	case <-c.closed:
+		return header{}, net.ErrClosed
+	case c.readTimeout <- context.Background():
+	}
+
+	return h, nil
+}
+
+func (c *Conn) readFramePayload(ctx context.Context, p []byte) (int, error) {
+	select {
+	case <-c.closed:
+		return 0, net.ErrClosed
+	case c.readTimeout <- ctx:
+	}
+
+	n, err := io.ReadFull(c.br, p)
+	if err != nil {
+		select {
+		case <-c.closed:
+			return n, net.ErrClosed
+		case <-ctx.Done():
+			return n, ctx.Err()
+		default:
+			return n, fmt.Errorf("failed to read frame payload: %w", err)
+		}
+	}
+
+	select {
+	case <-c.closed:
+		return n, net.ErrClosed
+	case c.readTimeout <- context.Background():
+	}
+
+	return n, err
+}
+
+func (c *Conn) handleControl(ctx context.Context, h header) (err error) {
+	if h.payloadLength < 0 || h.payloadLength > maxControlPayload {
+		err := fmt.Errorf("received control frame payload with invalid length: %d", h.payloadLength)
+		c.writeError(StatusProtocolError, err)
+		return err
+	}
+
+	if !h.fin {
+		err := errors.New("received fragmented control frame")
+		c.writeError(StatusProtocolError, err)
+		return err
+	}
+
+	ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+	defer cancel()
+
+	b := c.readControlBuf[:h.payloadLength]
+	_, err = c.readFramePayload(ctx, b)
+	if err != nil {
+		return err
+	}
+
+	if h.masked {
+		mask(b, h.maskKey)
+	}
+
+	switch h.opcode {
+	case opPing:
+		return c.writeControl(ctx, opPong, b)
+	case opPong:
+		c.activePingsMu.Lock()
+		pong, ok := c.activePings[string(b)]
+		c.activePingsMu.Unlock()
+		if ok {
+			select {
+			case pong <- struct{}{}:
+			default:
+			}
+		}
+		return nil
+	}
+
+	// opClose
+
+	ce, err := parseClosePayload(b)
+	if err != nil {
+		err = fmt.Errorf("received invalid close payload: %w", err)
+		c.writeError(StatusProtocolError, err)
+		return err
+	}
+
+	err = fmt.Errorf("received close frame: %w", ce)
+	c.writeClose(ce.Code, ce.Reason)
+	c.readMu.unlock()
+	c.close()
+	return err
+}
+
+func (c *Conn) reader(ctx context.Context) (_ MessageType, _ io.Reader, err error) {
+	defer errd.Wrap(&err, "failed to get reader")
+
+	err = c.readMu.lock(ctx)
+	if err != nil {
+		return 0, nil, err
+	}
+	defer c.readMu.unlock()
+
+	if !c.msgReader.fin {
+		return 0, nil, errors.New("previous message not read to completion")
+	}
+
+	h, err := c.readLoop(ctx)
+	if err != nil {
+		return 0, nil, err
+	}
+
+	if h.opcode == opContinuation {
+		err := errors.New("received continuation frame without text or binary frame")
+		c.writeError(StatusProtocolError, err)
+		return 0, nil, err
+	}
+
+	c.msgReader.reset(ctx, h)
+
+	return MessageType(h.opcode), c.msgReader, nil
+}
+
+type msgReader struct {
+	c *Conn
+
+	ctx         context.Context
+	flate       bool
+	flateReader io.Reader
+	flateBufio  *bufio.Reader
+	flateTail   strings.Reader
+	limitReader *limitReader
+	dict        *slidingWindow
+
+	fin           bool
+	payloadLength int64
+	maskKey       uint32
+
+	// util.ReaderFunc(mr.Read) to avoid continuous allocations.
+	readFunc util.ReaderFunc
+}
+
+func (mr *msgReader) reset(ctx context.Context, h header) {
+	mr.ctx = ctx
+	mr.flate = h.rsv1
+	mr.limitReader.reset(mr.readFunc)
+
+	if mr.flate {
+		mr.resetFlate()
+	}
+
+	mr.setFrame(h)
+}
+
+func (mr *msgReader) setFrame(h header) {
+	mr.fin = h.fin
+	mr.payloadLength = h.payloadLength
+	mr.maskKey = h.maskKey
+}
+
+func (mr *msgReader) Read(p []byte) (n int, err error) {
+	err = mr.c.readMu.lock(mr.ctx)
+	if err != nil {
+		return 0, fmt.Errorf("failed to read: %w", err)
+	}
+	defer mr.c.readMu.unlock()
+
+	n, err = mr.limitReader.Read(p)
+	if mr.flate && mr.flateContextTakeover() {
+		p = p[:n]
+		mr.dict.write(p)
+	}
+	if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) && mr.fin && mr.flate {
+		mr.putFlateReader()
+		return n, io.EOF
+	}
+	if err != nil {
+		return n, fmt.Errorf("failed to read: %w", err)
+	}
+	return n, nil
+}
+
+func (mr *msgReader) read(p []byte) (int, error) {
+	for {
+		if mr.payloadLength == 0 {
+			if mr.fin {
+				if mr.flate {
+					return mr.flateTail.Read(p)
+				}
+				return 0, io.EOF
+			}
+
+			h, err := mr.c.readLoop(mr.ctx)
+			if err != nil {
+				return 0, err
+			}
+			if h.opcode != opContinuation {
+				err := errors.New("received new data message without finishing the previous message")
+				mr.c.writeError(StatusProtocolError, err)
+				return 0, err
+			}
+			mr.setFrame(h)
+
+			continue
+		}
+
+		if int64(len(p)) > mr.payloadLength {
+			p = p[:mr.payloadLength]
+		}
+
+		n, err := mr.c.readFramePayload(mr.ctx, p)
+		if err != nil {
+			return n, err
+		}
+
+		mr.payloadLength -= int64(n)
+
+		if !mr.c.client {
+			mr.maskKey = mask(p, mr.maskKey)
+		}
+
+		return n, nil
+	}
+}
+
+type limitReader struct {
+	c     *Conn
+	r     io.Reader
+	limit xsync.Int64
+	n     int64
+}
+
+func newLimitReader(c *Conn, r io.Reader, limit int64) *limitReader {
+	lr := &limitReader{
+		c: c,
+	}
+	lr.limit.Store(limit)
+	lr.reset(r)
+	return lr
+}
+
+func (lr *limitReader) reset(r io.Reader) {
+	lr.n = lr.limit.Load()
+	lr.r = r
+}
+
+func (lr *limitReader) Read(p []byte) (int, error) {
+	if lr.n < 0 {
+		return lr.r.Read(p)
+	}
+
+	if lr.n == 0 {
+		err := fmt.Errorf("read limited at %v bytes", lr.limit.Load())
+		lr.c.writeError(StatusMessageTooBig, err)
+		return 0, err
+	}
+
+	if int64(len(p)) > lr.n {
+		p = p[:lr.n]
+	}
+	n, err := lr.r.Read(p)
+	lr.n -= int64(n)
+	if lr.n < 0 {
+		lr.n = 0
+	}
+	return n, err
+}
diff --git a/vendor/github.com/coder/websocket/stringer.go b/vendor/github.com/coder/websocket/stringer.go
new file mode 100644
index 0000000..5a66ba2
--- /dev/null
+++ b/vendor/github.com/coder/websocket/stringer.go
@@ -0,0 +1,91 @@
+// Code generated by "stringer -type=opcode,MessageType,StatusCode -output=stringer.go"; DO NOT EDIT.
+
+package websocket
+
+import "strconv"
+
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[opContinuation-0]
+	_ = x[opText-1]
+	_ = x[opBinary-2]
+	_ = x[opClose-8]
+	_ = x[opPing-9]
+	_ = x[opPong-10]
+}
+
+const (
+	_opcode_name_0 = "opContinuationopTextopBinary"
+	_opcode_name_1 = "opCloseopPingopPong"
+)
+
+var (
+	_opcode_index_0 = [...]uint8{0, 14, 20, 28}
+	_opcode_index_1 = [...]uint8{0, 7, 13, 19}
+)
+
+func (i opcode) String() string {
+	switch {
+	case 0 <= i && i <= 2:
+		return _opcode_name_0[_opcode_index_0[i]:_opcode_index_0[i+1]]
+	case 8 <= i && i <= 10:
+		i -= 8
+		return _opcode_name_1[_opcode_index_1[i]:_opcode_index_1[i+1]]
+	default:
+		return "opcode(" + strconv.FormatInt(int64(i), 10) + ")"
+	}
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[MessageText-1]
+	_ = x[MessageBinary-2]
+}
+
+const _MessageType_name = "MessageTextMessageBinary"
+
+var _MessageType_index = [...]uint8{0, 11, 24}
+
+func (i MessageType) String() string {
+	i -= 1
+	if i < 0 || i >= MessageType(len(_MessageType_index)-1) {
+		return "MessageType(" + strconv.FormatInt(int64(i+1), 10) + ")"
+	}
+	return _MessageType_name[_MessageType_index[i]:_MessageType_index[i+1]]
+}
+func _() {
+	// An "invalid array index" compiler error signifies that the constant values have changed.
+	// Re-run the stringer command to generate them again.
+	var x [1]struct{}
+	_ = x[StatusNormalClosure-1000]
+	_ = x[StatusGoingAway-1001]
+	_ = x[StatusProtocolError-1002]
+	_ = x[StatusUnsupportedData-1003]
+	_ = x[statusReserved-1004]
+	_ = x[StatusNoStatusRcvd-1005]
+	_ = x[StatusAbnormalClosure-1006]
+	_ = x[StatusInvalidFramePayloadData-1007]
+	_ = x[StatusPolicyViolation-1008]
+	_ = x[StatusMessageTooBig-1009]
+	_ = x[StatusMandatoryExtension-1010]
+	_ = x[StatusInternalError-1011]
+	_ = x[StatusServiceRestart-1012]
+	_ = x[StatusTryAgainLater-1013]
+	_ = x[StatusBadGateway-1014]
+	_ = x[StatusTLSHandshake-1015]
+}
+
+const _StatusCode_name = "StatusNormalClosureStatusGoingAwayStatusProtocolErrorStatusUnsupportedDatastatusReservedStatusNoStatusRcvdStatusAbnormalClosureStatusInvalidFramePayloadDataStatusPolicyViolationStatusMessageTooBigStatusMandatoryExtensionStatusInternalErrorStatusServiceRestartStatusTryAgainLaterStatusBadGatewayStatusTLSHandshake"
+
+var _StatusCode_index = [...]uint16{0, 19, 34, 53, 74, 88, 106, 127, 156, 177, 196, 220, 239, 259, 278, 294, 312}
+
+func (i StatusCode) String() string {
+	i -= 1000
+	if i < 0 || i >= StatusCode(len(_StatusCode_index)-1) {
+		return "StatusCode(" + strconv.FormatInt(int64(i+1000), 10) + ")"
+	}
+	return _StatusCode_name[_StatusCode_index[i]:_StatusCode_index[i+1]]
+}
diff --git a/vendor/github.com/coder/websocket/write.go b/vendor/github.com/coder/websocket/write.go
new file mode 100644
index 0000000..e294a68
--- /dev/null
+++ b/vendor/github.com/coder/websocket/write.go
@@ -0,0 +1,376 @@
+//go:build !js
+// +build !js
+
+package websocket
+
+import (
+	"bufio"
+	"context"
+	"crypto/rand"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"time"
+
+	"compress/flate"
+
+	"github.com/coder/websocket/internal/errd"
+	"github.com/coder/websocket/internal/util"
+)
+
+// Writer returns a writer bounded by the context that will write
+// a WebSocket message of type dataType to the connection.
+//
+// You must close the writer once you have written the entire message.
+//
+// Only one writer can be open at a time, multiple calls will block until the previous writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+	w, err := c.writer(ctx, typ)
+	if err != nil {
+		return nil, fmt.Errorf("failed to get writer: %w", err)
+	}
+	return w, nil
+}
+
+// Write writes a message to the connection.
+//
+// See the Writer method if you want to stream a message.
+//
+// If compression is disabled or the compression threshold is not met, then it
+// will write the message in a single frame.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+	_, err := c.write(ctx, typ, p)
+	if err != nil {
+		return fmt.Errorf("failed to write msg: %w", err)
+	}
+	return nil
+}
+
+type msgWriter struct {
+	c *Conn
+
+	mu      *mu
+	writeMu *mu
+	closed  bool
+
+	ctx    context.Context
+	opcode opcode
+	flate  bool
+
+	trimWriter  *trimLastFourBytesWriter
+	flateWriter *flate.Writer
+}
+
+func newMsgWriter(c *Conn) *msgWriter {
+	mw := &msgWriter{
+		c:       c,
+		mu:      newMu(c),
+		writeMu: newMu(c),
+	}
+	return mw
+}
+
+func (mw *msgWriter) ensureFlate() {
+	if mw.trimWriter == nil {
+		mw.trimWriter = &trimLastFourBytesWriter{
+			w: util.WriterFunc(mw.write),
+		}
+	}
+
+	if mw.flateWriter == nil {
+		mw.flateWriter = getFlateWriter(mw.trimWriter)
+	}
+	mw.flate = true
+}
+
+func (mw *msgWriter) flateContextTakeover() bool {
+	if mw.c.client {
+		return !mw.c.copts.clientNoContextTakeover
+	}
+	return !mw.c.copts.serverNoContextTakeover
+}
+
+func (c *Conn) writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+	err := c.msgWriter.reset(ctx, typ)
+	if err != nil {
+		return nil, err
+	}
+	return c.msgWriter, nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) (int, error) {
+	mw, err := c.writer(ctx, typ)
+	if err != nil {
+		return 0, err
+	}
+
+	if !c.flate() {
+		defer c.msgWriter.mu.unlock()
+		return c.writeFrame(ctx, true, false, c.msgWriter.opcode, p)
+	}
+
+	n, err := mw.Write(p)
+	if err != nil {
+		return n, err
+	}
+
+	err = mw.Close()
+	return n, err
+}
+
+func (mw *msgWriter) reset(ctx context.Context, typ MessageType) error {
+	err := mw.mu.lock(ctx)
+	if err != nil {
+		return err
+	}
+
+	mw.ctx = ctx
+	mw.opcode = opcode(typ)
+	mw.flate = false
+	mw.closed = false
+
+	mw.trimWriter.reset()
+
+	return nil
+}
+
+func (mw *msgWriter) putFlateWriter() {
+	if mw.flateWriter != nil {
+		putFlateWriter(mw.flateWriter)
+		mw.flateWriter = nil
+	}
+}
+
+// Write writes the given bytes to the WebSocket connection.
+func (mw *msgWriter) Write(p []byte) (_ int, err error) {
+	err = mw.writeMu.lock(mw.ctx)
+	if err != nil {
+		return 0, fmt.Errorf("failed to write: %w", err)
+	}
+	defer mw.writeMu.unlock()
+
+	if mw.closed {
+		return 0, errors.New("cannot use closed writer")
+	}
+
+	defer func() {
+		if err != nil {
+			err = fmt.Errorf("failed to write: %w", err)
+		}
+	}()
+
+	if mw.c.flate() {
+		// Only enables flate if the length crosses the
+		// threshold on the first frame
+		if mw.opcode != opContinuation && len(p) >= mw.c.flateThreshold {
+			mw.ensureFlate()
+		}
+	}
+
+	if mw.flate {
+		return mw.flateWriter.Write(p)
+	}
+
+	return mw.write(p)
+}
+
+func (mw *msgWriter) write(p []byte) (int, error) {
+	n, err := mw.c.writeFrame(mw.ctx, false, mw.flate, mw.opcode, p)
+	if err != nil {
+		return n, fmt.Errorf("failed to write data frame: %w", err)
+	}
+	mw.opcode = opContinuation
+	return n, nil
+}
+
+// Close flushes the frame to the connection.
+func (mw *msgWriter) Close() (err error) {
+	defer errd.Wrap(&err, "failed to close writer")
+
+	err = mw.writeMu.lock(mw.ctx)
+	if err != nil {
+		return err
+	}
+	defer mw.writeMu.unlock()
+
+	if mw.closed {
+		return errors.New("writer already closed")
+	}
+	mw.closed = true
+
+	if mw.flate {
+		err = mw.flateWriter.Flush()
+		if err != nil {
+			return fmt.Errorf("failed to flush flate: %w", err)
+		}
+	}
+
+	_, err = mw.c.writeFrame(mw.ctx, true, mw.flate, mw.opcode, nil)
+	if err != nil {
+		return fmt.Errorf("failed to write fin frame: %w", err)
+	}
+
+	if mw.flate && !mw.flateContextTakeover() {
+		mw.putFlateWriter()
+	}
+	mw.mu.unlock()
+	return nil
+}
+
+func (mw *msgWriter) close() {
+	if mw.c.client {
+		mw.c.writeFrameMu.forceLock()
+		putBufioWriter(mw.c.bw)
+	}
+
+	mw.writeMu.forceLock()
+	mw.putFlateWriter()
+}
+
+func (c *Conn) writeControl(ctx context.Context, opcode opcode, p []byte) error {
+	ctx, cancel := context.WithTimeout(ctx, time.Second*5)
+	defer cancel()
+
+	_, err := c.writeFrame(ctx, true, false, opcode, p)
+	if err != nil {
+		return fmt.Errorf("failed to write control frame %v: %w", opcode, err)
+	}
+	return nil
+}
+
+// writeFrame handles all writes to the connection.
+func (c *Conn) writeFrame(ctx context.Context, fin bool, flate bool, opcode opcode, p []byte) (_ int, err error) {
+	err = c.writeFrameMu.lock(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer c.writeFrameMu.unlock()
+
+	select {
+	case <-c.closed:
+		return 0, net.ErrClosed
+	case c.writeTimeout <- ctx:
+	}
+
+	defer func() {
+		if err != nil {
+			select {
+			case <-c.closed:
+				err = net.ErrClosed
+			case <-ctx.Done():
+				err = ctx.Err()
+			default:
+			}
+			err = fmt.Errorf("failed to write frame: %w", err)
+		}
+	}()
+
+	c.writeHeader.fin = fin
+	c.writeHeader.opcode = opcode
+	c.writeHeader.payloadLength = int64(len(p))
+
+	if c.client {
+		c.writeHeader.masked = true
+		_, err = io.ReadFull(rand.Reader, c.writeHeaderBuf[:4])
+		if err != nil {
+			return 0, fmt.Errorf("failed to generate masking key: %w", err)
+		}
+		c.writeHeader.maskKey = binary.LittleEndian.Uint32(c.writeHeaderBuf[:])
+	}
+
+	c.writeHeader.rsv1 = false
+	if flate && (opcode == opText || opcode == opBinary) {
+		c.writeHeader.rsv1 = true
+	}
+
+	err = writeFrameHeader(c.writeHeader, c.bw, c.writeHeaderBuf[:])
+	if err != nil {
+		return 0, err
+	}
+
+	n, err := c.writeFramePayload(p)
+	if err != nil {
+		return n, err
+	}
+
+	if c.writeHeader.fin {
+		err = c.bw.Flush()
+		if err != nil {
+			return n, fmt.Errorf("failed to flush: %w", err)
+		}
+	}
+
+	select {
+	case <-c.closed:
+		if opcode == opClose {
+			return n, nil
+		}
+		return n, net.ErrClosed
+	case c.writeTimeout <- context.Background():
+	}
+
+	return n, nil
+}
+
+func (c *Conn) writeFramePayload(p []byte) (n int, err error) {
+	defer errd.Wrap(&err, "failed to write frame payload")
+
+	if !c.writeHeader.masked {
+		return c.bw.Write(p)
+	}
+
+	maskKey := c.writeHeader.maskKey
+	for len(p) > 0 {
+		// If the buffer is full, we need to flush.
+		if c.bw.Available() == 0 {
+			err = c.bw.Flush()
+			if err != nil {
+				return n, err
+			}
+		}
+
+		// Start of next write in the buffer.
+		i := c.bw.Buffered()
+
+		j := len(p)
+		if j > c.bw.Available() {
+			j = c.bw.Available()
+		}
+
+		_, err := c.bw.Write(p[:j])
+		if err != nil {
+			return n, err
+		}
+
+		maskKey = mask(c.writeBuf[i:c.bw.Buffered()], maskKey)
+
+		p = p[j:]
+		n += j
+	}
+
+	return n, nil
+}
+
+// extractBufioWriterBuf grabs the []byte backing a *bufio.Writer
+// and returns it.
+func extractBufioWriterBuf(bw *bufio.Writer, w io.Writer) []byte {
+	var writeBuf []byte
+	bw.Reset(util.WriterFunc(func(p2 []byte) (int, error) {
+		writeBuf = p2[:cap(p2)]
+		return len(p2), nil
+	}))
+
+	bw.WriteByte(0)
+	bw.Flush()
+
+	bw.Reset(w)
+
+	return writeBuf
+}
+
+func (c *Conn) writeError(code StatusCode, err error) {
+	c.writeClose(code, err.Error())
+}
diff --git a/vendor/github.com/coder/websocket/ws_js.go b/vendor/github.com/coder/websocket/ws_js.go
new file mode 100644
index 0000000..a8de0c6
--- /dev/null
+++ b/vendor/github.com/coder/websocket/ws_js.go
@@ -0,0 +1,598 @@
+package websocket // import "github.com/coder/websocket"
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"syscall/js"
+
+	"github.com/coder/websocket/internal/bpool"
+	"github.com/coder/websocket/internal/wsjs"
+	"github.com/coder/websocket/internal/xsync"
+)
+
+// opcode represents a WebSocket opcode.
+type opcode int
+
+// https://tools.ietf.org/html/rfc6455#section-11.8.
+const (
+	opContinuation opcode = iota
+	opText
+	opBinary
+	// 3 - 7 are reserved for further non-control frames.
+	_
+	_
+	_
+	_
+	_
+	opClose
+	opPing
+	opPong
+	// 11-16 are reserved for further control frames.
+)
+
+// Conn provides a wrapper around the browser WebSocket API.
+type Conn struct {
+	noCopy noCopy
+	ws     wsjs.WebSocket
+
+	// read limit for a message in bytes.
+	msgReadLimit xsync.Int64
+
+	closeReadMu  sync.Mutex
+	closeReadCtx context.Context
+
+	closingMu     sync.Mutex
+	closeOnce     sync.Once
+	closed        chan struct{}
+	closeErrOnce  sync.Once
+	closeErr      error
+	closeWasClean bool
+
+	releaseOnClose   func()
+	releaseOnError   func()
+	releaseOnMessage func()
+
+	readSignal chan struct{}
+	readBufMu  sync.Mutex
+	readBuf    []wsjs.MessageEvent
+}
+
+func (c *Conn) close(err error, wasClean bool) {
+	c.closeOnce.Do(func() {
+		runtime.SetFinalizer(c, nil)
+
+		if !wasClean {
+			err = fmt.Errorf("unclean connection close: %w", err)
+		}
+		c.setCloseErr(err)
+		c.closeWasClean = wasClean
+		close(c.closed)
+	})
+}
+
+func (c *Conn) init() {
+	c.closed = make(chan struct{})
+	c.readSignal = make(chan struct{}, 1)
+
+	c.msgReadLimit.Store(32768)
+
+	c.releaseOnClose = c.ws.OnClose(func(e wsjs.CloseEvent) {
+		err := CloseError{
+			Code:   StatusCode(e.Code),
+			Reason: e.Reason,
+		}
+		// We do not know if we sent or received this close as
+		// its possible the browser triggered it without us
+		// explicitly sending it.
+		c.close(err, e.WasClean)
+
+		c.releaseOnClose()
+		c.releaseOnError()
+		c.releaseOnMessage()
+	})
+
+	c.releaseOnError = c.ws.OnError(func(v js.Value) {
+		c.setCloseErr(errors.New(v.Get("message").String()))
+		c.closeWithInternal()
+	})
+
+	c.releaseOnMessage = c.ws.OnMessage(func(e wsjs.MessageEvent) {
+		c.readBufMu.Lock()
+		defer c.readBufMu.Unlock()
+
+		c.readBuf = append(c.readBuf, e)
+
+		// Lets the read goroutine know there is definitely something in readBuf.
+		select {
+		case c.readSignal <- struct{}{}:
+		default:
+		}
+	})
+
+	runtime.SetFinalizer(c, func(c *Conn) {
+		c.setCloseErr(errors.New("connection garbage collected"))
+		c.closeWithInternal()
+	})
+}
+
+func (c *Conn) closeWithInternal() {
+	c.Close(StatusInternalError, "something went wrong")
+}
+
+// Read attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Read(ctx context.Context) (MessageType, []byte, error) {
+	c.closeReadMu.Lock()
+	closedRead := c.closeReadCtx != nil
+	c.closeReadMu.Unlock()
+	if closedRead {
+		return 0, nil, errors.New("WebSocket connection read closed")
+	}
+
+	typ, p, err := c.read(ctx)
+	if err != nil {
+		return 0, nil, fmt.Errorf("failed to read: %w", err)
+	}
+	readLimit := c.msgReadLimit.Load()
+	if readLimit >= 0 && int64(len(p)) > readLimit {
+		err := fmt.Errorf("read limited at %v bytes", c.msgReadLimit.Load())
+		c.Close(StatusMessageTooBig, err.Error())
+		return 0, nil, err
+	}
+	return typ, p, nil
+}
+
+func (c *Conn) read(ctx context.Context) (MessageType, []byte, error) {
+	select {
+	case <-ctx.Done():
+		c.Close(StatusPolicyViolation, "read timed out")
+		return 0, nil, ctx.Err()
+	case <-c.readSignal:
+	case <-c.closed:
+		return 0, nil, net.ErrClosed
+	}
+
+	c.readBufMu.Lock()
+	defer c.readBufMu.Unlock()
+
+	me := c.readBuf[0]
+	// We copy the messages forward and decrease the size
+	// of the slice to avoid reallocating.
+	copy(c.readBuf, c.readBuf[1:])
+	c.readBuf = c.readBuf[:len(c.readBuf)-1]
+
+	if len(c.readBuf) > 0 {
+		// Next time we read, we'll grab the message.
+		select {
+		case c.readSignal <- struct{}{}:
+		default:
+		}
+	}
+
+	switch p := me.Data.(type) {
+	case string:
+		return MessageText, []byte(p), nil
+	case []byte:
+		return MessageBinary, p, nil
+	default:
+		panic("websocket: unexpected data type from wsjs OnMessage: " + reflect.TypeOf(me.Data).String())
+	}
+}
+
+// Ping is mocked out for Wasm.
+func (c *Conn) Ping(ctx context.Context) error {
+	return nil
+}
+
+// Write writes a message of the given type to the connection.
+// Always non blocking.
+func (c *Conn) Write(ctx context.Context, typ MessageType, p []byte) error {
+	err := c.write(ctx, typ, p)
+	if err != nil {
+		// Have to ensure the WebSocket is closed after a write error
+		// to match the Go API. It can only error if the message type
+		// is unexpected or the passed bytes contain invalid UTF-8 for
+		// MessageText.
+		err := fmt.Errorf("failed to write: %w", err)
+		c.setCloseErr(err)
+		c.closeWithInternal()
+		return err
+	}
+	return nil
+}
+
+func (c *Conn) write(ctx context.Context, typ MessageType, p []byte) error {
+	if c.isClosed() {
+		return net.ErrClosed
+	}
+	switch typ {
+	case MessageBinary:
+		return c.ws.SendBytes(p)
+	case MessageText:
+		return c.ws.SendText(string(p))
+	default:
+		return fmt.Errorf("unexpected message type: %v", typ)
+	}
+}
+
+// Close closes the WebSocket with the given code and reason.
+// It will wait until the peer responds with a close frame
+// or the connection is closed.
+// It thus performs the full WebSocket close handshake.
+func (c *Conn) Close(code StatusCode, reason string) error {
+	err := c.exportedClose(code, reason)
+	if err != nil {
+		return fmt.Errorf("failed to close WebSocket: %w", err)
+	}
+	return nil
+}
+
+// CloseNow closes the WebSocket connection without attempting a close handshake.
+// Use when you do not want the overhead of the close handshake.
+//
+// note: No different from Close(StatusGoingAway, "") in WASM as there is no way to close
+// a WebSocket without the close handshake.
+func (c *Conn) CloseNow() error {
+	return c.Close(StatusGoingAway, "")
+}
+
+func (c *Conn) exportedClose(code StatusCode, reason string) error {
+	c.closingMu.Lock()
+	defer c.closingMu.Unlock()
+
+	if c.isClosed() {
+		return net.ErrClosed
+	}
+
+	ce := fmt.Errorf("sent close: %w", CloseError{
+		Code:   code,
+		Reason: reason,
+	})
+
+	c.setCloseErr(ce)
+	err := c.ws.Close(int(code), reason)
+	if err != nil {
+		return err
+	}
+
+	<-c.closed
+	if !c.closeWasClean {
+		return c.closeErr
+	}
+	return nil
+}
+
+// Subprotocol returns the negotiated subprotocol.
+// An empty string means the default protocol.
+func (c *Conn) Subprotocol() string {
+	return c.ws.Subprotocol()
+}
+
+// DialOptions represents the options available to pass to Dial.
+type DialOptions struct {
+	// Subprotocols lists the subprotocols to negotiate with the server.
+	Subprotocols []string
+}
+
+// Dial creates a new WebSocket connection to the given url with the given options.
+// The passed context bounds the maximum time spent waiting for the connection to open.
+// The returned *http.Response is always nil or a mock. It's only in the signature
+// to match the core API.
+func Dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+	c, resp, err := dial(ctx, url, opts)
+	if err != nil {
+		return nil, nil, fmt.Errorf("failed to WebSocket dial %q: %w", url, err)
+	}
+	return c, resp, nil
+}
+
+func dial(ctx context.Context, url string, opts *DialOptions) (*Conn, *http.Response, error) {
+	if opts == nil {
+		opts = &DialOptions{}
+	}
+
+	url = strings.Replace(url, "http://", "ws://", 1)
+	url = strings.Replace(url, "https://", "wss://", 1)
+
+	ws, err := wsjs.New(url, opts.Subprotocols)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	c := &Conn{
+		ws: ws,
+	}
+	c.init()
+
+	opench := make(chan struct{})
+	releaseOpen := ws.OnOpen(func(e js.Value) {
+		close(opench)
+	})
+	defer releaseOpen()
+
+	select {
+	case <-ctx.Done():
+		c.Close(StatusPolicyViolation, "dial timed out")
+		return nil, nil, ctx.Err()
+	case <-opench:
+		return c, &http.Response{
+			StatusCode: http.StatusSwitchingProtocols,
+		}, nil
+	case <-c.closed:
+		return nil, nil, net.ErrClosed
+	}
+}
+
+// Reader attempts to read a message from the connection.
+// The maximum time spent waiting is bounded by the context.
+func (c *Conn) Reader(ctx context.Context) (MessageType, io.Reader, error) {
+	typ, p, err := c.Read(ctx)
+	if err != nil {
+		return 0, nil, err
+	}
+	return typ, bytes.NewReader(p), nil
+}
+
+// Writer returns a writer to write a WebSocket data message to the connection.
+// It buffers the entire message in memory and then sends it when the writer
+// is closed.
+func (c *Conn) Writer(ctx context.Context, typ MessageType) (io.WriteCloser, error) {
+	return &writer{
+		c:   c,
+		ctx: ctx,
+		typ: typ,
+		b:   bpool.Get(),
+	}, nil
+}
+
+type writer struct {
+	closed bool
+
+	c   *Conn
+	ctx context.Context
+	typ MessageType
+
+	b *bytes.Buffer
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+	if w.closed {
+		return 0, errors.New("cannot write to closed writer")
+	}
+	n, err := w.b.Write(p)
+	if err != nil {
+		return n, fmt.Errorf("failed to write message: %w", err)
+	}
+	return n, nil
+}
+
+func (w *writer) Close() error {
+	if w.closed {
+		return errors.New("cannot close closed writer")
+	}
+	w.closed = true
+	defer bpool.Put(w.b)
+
+	err := w.c.Write(w.ctx, w.typ, w.b.Bytes())
+	if err != nil {
+		return fmt.Errorf("failed to close writer: %w", err)
+	}
+	return nil
+}
+
+// CloseRead implements *Conn.CloseRead for wasm.
+func (c *Conn) CloseRead(ctx context.Context) context.Context {
+	c.closeReadMu.Lock()
+	ctx2 := c.closeReadCtx
+	if ctx2 != nil {
+		c.closeReadMu.Unlock()
+		return ctx2
+	}
+	ctx, cancel := context.WithCancel(ctx)
+	c.closeReadCtx = ctx
+	c.closeReadMu.Unlock()
+
+	go func() {
+		defer cancel()
+		defer c.CloseNow()
+		_, _, err := c.read(ctx)
+		if err != nil {
+			c.Close(StatusPolicyViolation, "unexpected data message")
+		}
+	}()
+	return ctx
+}
+
+// SetReadLimit implements *Conn.SetReadLimit for wasm.
+func (c *Conn) SetReadLimit(n int64) {
+	c.msgReadLimit.Store(n)
+}
+
+func (c *Conn) setCloseErr(err error) {
+	c.closeErrOnce.Do(func() {
+		c.closeErr = fmt.Errorf("WebSocket closed: %w", err)
+	})
+}
+
+func (c *Conn) isClosed() bool {
+	select {
+	case <-c.closed:
+		return true
+	default:
+		return false
+	}
+}
+
+// AcceptOptions represents Accept's options.
+type AcceptOptions struct {
+	Subprotocols         []string
+	InsecureSkipVerify   bool
+	OriginPatterns       []string
+	CompressionMode      CompressionMode
+	CompressionThreshold int
+}
+
+// Accept is stubbed out for Wasm.
+func Accept(w http.ResponseWriter, r *http.Request, opts *AcceptOptions) (*Conn, error) {
+	return nil, errors.New("unimplemented")
+}
+
+// StatusCode represents a WebSocket status code.
+// https://tools.ietf.org/html/rfc6455#section-7.4
+type StatusCode int
+
+// https://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number
+//
+// These are only the status codes defined by the protocol.
+//
+// You can define custom codes in the 3000-4999 range.
+// The 3000-3999 range is reserved for use by libraries, frameworks and applications.
+// The 4000-4999 range is reserved for private use.
+const (
+	StatusNormalClosure   StatusCode = 1000
+	StatusGoingAway       StatusCode = 1001
+	StatusProtocolError   StatusCode = 1002
+	StatusUnsupportedData StatusCode = 1003
+
+	// 1004 is reserved and so unexported.
+	statusReserved StatusCode = 1004
+
+	// StatusNoStatusRcvd cannot be sent in a close message.
+	// It is reserved for when a close message is received without
+	// a status code.
+	StatusNoStatusRcvd StatusCode = 1005
+
+	// StatusAbnormalClosure is exported for use only with Wasm.
+	// In non Wasm Go, the returned error will indicate whether the
+	// connection was closed abnormally.
+	StatusAbnormalClosure StatusCode = 1006
+
+	StatusInvalidFramePayloadData StatusCode = 1007
+	StatusPolicyViolation         StatusCode = 1008
+	StatusMessageTooBig           StatusCode = 1009
+	StatusMandatoryExtension      StatusCode = 1010
+	StatusInternalError           StatusCode = 1011
+	StatusServiceRestart          StatusCode = 1012
+	StatusTryAgainLater           StatusCode = 1013
+	StatusBadGateway              StatusCode = 1014
+
+	// StatusTLSHandshake is only exported for use with Wasm.
+	// In non Wasm Go, the returned error will indicate whether there was
+	// a TLS handshake failure.
+	StatusTLSHandshake StatusCode = 1015
+)
+
+// CloseError is returned when the connection is closed with a status and reason.
+//
+// Use Go 1.13's errors.As to check for this error.
+// Also see the CloseStatus helper.
+type CloseError struct {
+	Code   StatusCode
+	Reason string
+}
+
+func (ce CloseError) Error() string {
+	return fmt.Sprintf("status = %v and reason = %q", ce.Code, ce.Reason)
+}
+
+// CloseStatus is a convenience wrapper around Go 1.13's errors.As to grab
+// the status code from a CloseError.
+//
+// -1 will be returned if the passed error is nil or not a CloseError.
+func CloseStatus(err error) StatusCode {
+	var ce CloseError
+	if errors.As(err, &ce) {
+		return ce.Code
+	}
+	return -1
+}
+
+// CompressionMode represents the modes available to the deflate extension.
+// See https://tools.ietf.org/html/rfc7692
+// Works in all browsers except Safari which does not implement the deflate extension.
+type CompressionMode int
+
+const (
+	// CompressionNoContextTakeover grabs a new flate.Reader and flate.Writer as needed
+	// for every message. This applies to both server and client side.
+	//
+	// This means less efficient compression as the sliding window from previous messages
+	// will not be used but the memory overhead will be lower if the connections
+	// are long lived and seldom used.
+	//
+	// The message will only be compressed if greater than 512 bytes.
+	CompressionNoContextTakeover CompressionMode = iota
+
+	// CompressionContextTakeover uses a flate.Reader and flate.Writer per connection.
+	// This enables reusing the sliding window from previous messages.
+	// As most WebSocket protocols are repetitive, this can be very efficient.
+	// It carries an overhead of 8 kB for every connection compared to CompressionNoContextTakeover.
+	//
+	// If the peer negotiates NoContextTakeover on the client or server side, it will be
+	// used instead as this is required by the RFC.
+	CompressionContextTakeover
+
+	// CompressionDisabled disables the deflate extension.
+	//
+	// Use this if you are using a predominantly binary protocol with very
+	// little duplication in between messages or CPU and memory are more
+	// important than bandwidth.
+	CompressionDisabled
+)
+
+// MessageType represents the type of a WebSocket message.
+// See https://tools.ietf.org/html/rfc6455#section-5.6
+type MessageType int
+
+// MessageType constants.
+const (
+	// MessageText is for UTF-8 encoded text messages like JSON.
+	MessageText MessageType = iota + 1
+	// MessageBinary is for binary messages like protobufs.
+	MessageBinary
+)
+
+type mu struct {
+	c  *Conn
+	ch chan struct{}
+}
+
+func newMu(c *Conn) *mu {
+	return &mu{
+		c:  c,
+		ch: make(chan struct{}, 1),
+	}
+}
+
+func (m *mu) forceLock() {
+	m.ch <- struct{}{}
+}
+
+func (m *mu) tryLock() bool {
+	select {
+	case m.ch <- struct{}{}:
+		return true
+	default:
+		return false
+	}
+}
+
+func (m *mu) unlock() {
+	select {
+	case <-m.ch:
+	default:
+	}
+}
+
+type noCopy struct{}
+
+func (*noCopy) Lock() {}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
new file mode 100644
index 0000000..bc52e96
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -0,0 +1,15 @@
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
new file mode 100644
index 0000000..7929947
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -0,0 +1,145 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is not running on Google App Engine, compiled by GopherJS, and
+// "-tags safe" is not added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
+
+package spew
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = false
+
+	// ptrSize is the size of a pointer on the current arch.
+	ptrSize = unsafe.Sizeof((*byte)(nil))
+)
+
+type flag uintptr
+
+var (
+	// flagRO indicates whether the value field of a reflect.Value
+	// is read-only.
+	flagRO flag
+
+	// flagAddr indicates whether the address of the reflect.Value's
+	// value may be taken.
+	flagAddr flag
+)
+
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
+
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+	ro, addr flag
+}{{
+	// From Go 1.4 to 1.5
+	ro:   1 << 5,
+	addr: 1 << 7,
+}, {
+	// Up to Go tip.
+	ro:   1<<5 | 1<<6,
+	addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+	return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
+}
+
+// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
+// the typical safety restrictions preventing access to unaddressable and
+// unexported data.  It works by digging the raw pointer to the underlying
+// value out of the protected value and generating a new unprotected (unsafe)
+// reflect.Value to it.
+//
+// This allows us to check for implementations of the Stringer and error
+// interfaces to be used for pretty printing ordinarily unaddressable and
+// inaccessible values such as unexported struct fields.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+		return v
+	}
+	flagFieldPtr := flagField(&v)
+	*flagFieldPtr &^= flagRO
+	*flagFieldPtr |= flagAddr
+	return v
+}
+
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+	field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+	if !ok {
+		panic("reflect.Value has no flag field")
+	}
+	if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+		panic("reflect.Value flag field has changed kind")
+	}
+	type t0 int
+	var t struct {
+		A t0
+		// t0 will have flagEmbedRO set.
+		t0
+		// a will have flagStickyRO set
+		a t0
+	}
+	vA := reflect.ValueOf(t).FieldByName("A")
+	va := reflect.ValueOf(t).FieldByName("a")
+	vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+	// Infer flagRO from the difference between the flags
+	// for the (otherwise identical) fields in t.
+	flagPublic := *flagField(&vA)
+	flagWithRO := *flagField(&va) | *flagField(&vt0)
+	flagRO = flagPublic ^ flagWithRO
+
+	// Infer flagAddr from the difference between a value
+	// taken from a pointer and not.
+	vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+	flagNoPtr := *flagField(&vA)
+	flagPtr := *flagField(&vPtrA)
+	flagAddr = flagNoPtr ^ flagPtr
+
+	// Check that the inferred flags tally with one of the known versions.
+	for _, f := range okFlags {
+		if flagRO == f.ro && flagAddr == f.addr {
+			return
+		}
+	}
+	panic("reflect.Value read-only flag has changed semantics")
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
new file mode 100644
index 0000000..205c28d
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
+//
+// Permission to use, copy, modify, and distribute this software for any
+// purpose with or without fee is hereby granted, provided that the above
+// copyright notice and this permission notice appear in all copies.
+//
+// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+// NOTE: Due to the following build constraints, this file will only be compiled
+// when the code is running on Google App Engine, compiled by GopherJS, or
+// "-tags safe" is added to the go build command line.  The "disableunsafe"
+// tag is deprecated and thus should not be used.
+// +build js appengine safe disableunsafe !go1.4
+
+package spew
+
+import "reflect"
+
+const (
+	// UnsafeDisabled is a build-time constant which specifies whether or
+	// not access to the unsafe package is available.
+	UnsafeDisabled = true
+)
+
+// unsafeReflectValue typically converts the passed reflect.Value into a one
+// that bypasses the typical safety restrictions preventing access to
+// unaddressable and unexported data.  However, doing this relies on access to
+// the unsafe package.  This is a stub version which simply returns the passed
+// reflect.Value when the unsafe package is not available.
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+	return v
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
new file mode 100644
index 0000000..1be8ce9
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strconv"
+)
+
+// Some constants in the form of bytes to avoid string overhead.  This mirrors
+// the technique used in the fmt package.
+var (
+	panicBytes            = []byte("(PANIC=")
+	plusBytes             = []byte("+")
+	iBytes                = []byte("i")
+	trueBytes             = []byte("true")
+	falseBytes            = []byte("false")
+	interfaceBytes        = []byte("(interface {})")
+	commaNewlineBytes     = []byte(",\n")
+	newlineBytes          = []byte("\n")
+	openBraceBytes        = []byte("{")
+	openBraceNewlineBytes = []byte("{\n")
+	closeBraceBytes       = []byte("}")
+	asteriskBytes         = []byte("*")
+	colonBytes            = []byte(":")
+	colonSpaceBytes       = []byte(": ")
+	openParenBytes        = []byte("(")
+	closeParenBytes       = []byte(")")
+	spaceBytes            = []byte(" ")
+	pointerChainBytes     = []byte("->")
+	nilAngleBytes         = []byte("<nil>")
+	maxNewlineBytes       = []byte("<max depth reached>\n")
+	maxShortBytes         = []byte("<max>")
+	circularBytes         = []byte("<already shown>")
+	circularShortBytes    = []byte("<shown>")
+	invalidAngleBytes     = []byte("<invalid>")
+	openBracketBytes      = []byte("[")
+	closeBracketBytes     = []byte("]")
+	percentBytes          = []byte("%")
+	precisionBytes        = []byte(".")
+	openAngleBytes        = []byte("<")
+	closeAngleBytes       = []byte(">")
+	openMapBytes          = []byte("map[")
+	closeMapBytes         = []byte("]")
+	lenEqualsBytes        = []byte("len=")
+	capEqualsBytes        = []byte("cap=")
+)
+
+// hexDigits is used to map a decimal value to a hex digit.
+var hexDigits = "0123456789abcdef"
+
+// catchPanic handles any panics that might occur during the handleMethods
+// calls.
+func catchPanic(w io.Writer, v reflect.Value) {
+	if err := recover(); err != nil {
+		w.Write(panicBytes)
+		fmt.Fprintf(w, "%v", err)
+		w.Write(closeParenBytes)
+	}
+}
+
+// handleMethods attempts to call the Error and String methods on the underlying
+// type the passed reflect.Value represents and outputes the result to Writer w.
+//
+// It handles panics in any called methods by catching and displaying the error
+// as the formatted value.
+func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
+	// We need an interface to check if the type implements the error or
+	// Stringer interface.  However, the reflect package won't give us an
+	// interface on certain things like unexported struct fields in order
+	// to enforce visibility rules.  We use unsafe, when it's available,
+	// to bypass these restrictions since this package does not mutate the
+	// values.
+	if !v.CanInterface() {
+		if UnsafeDisabled {
+			return false
+		}
+
+		v = unsafeReflectValue(v)
+	}
+
+	// Choose whether or not to do error and Stringer interface lookups against
+	// the base type or a pointer to the base type depending on settings.
+	// Technically calling one of these methods with a pointer receiver can
+	// mutate the value, however, types which choose to satisify an error or
+	// Stringer interface with a pointer receiver should not be mutating their
+	// state inside these interface methods.
+	if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
+		v = unsafeReflectValue(v)
+	}
+	if v.CanAddr() {
+		v = v.Addr()
+	}
+
+	// Is it an error or Stringer?
+	switch iface := v.Interface().(type) {
+	case error:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.Error()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+
+		w.Write([]byte(iface.Error()))
+		return true
+
+	case fmt.Stringer:
+		defer catchPanic(w, v)
+		if cs.ContinueOnMethod {
+			w.Write(openParenBytes)
+			w.Write([]byte(iface.String()))
+			w.Write(closeParenBytes)
+			w.Write(spaceBytes)
+			return false
+		}
+		w.Write([]byte(iface.String()))
+		return true
+	}
+	return false
+}
+
+// printBool outputs a boolean value as true or false to Writer w.
+func printBool(w io.Writer, val bool) {
+	if val {
+		w.Write(trueBytes)
+	} else {
+		w.Write(falseBytes)
+	}
+}
+
+// printInt outputs a signed integer value to Writer w.
+func printInt(w io.Writer, val int64, base int) {
+	w.Write([]byte(strconv.FormatInt(val, base)))
+}
+
+// printUint outputs an unsigned integer value to Writer w.
+func printUint(w io.Writer, val uint64, base int) {
+	w.Write([]byte(strconv.FormatUint(val, base)))
+}
+
+// printFloat outputs a floating point value using the specified precision,
+// which is expected to be 32 or 64bit, to Writer w.
+func printFloat(w io.Writer, val float64, precision int) {
+	w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
+}
+
+// printComplex outputs a complex value using the specified float precision
+// for the real and imaginary parts to Writer w.
+func printComplex(w io.Writer, c complex128, floatPrecision int) {
+	r := real(c)
+	w.Write(openParenBytes)
+	w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
+	i := imag(c)
+	if i >= 0 {
+		w.Write(plusBytes)
+	}
+	w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
+	w.Write(iBytes)
+	w.Write(closeParenBytes)
+}
+
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
+// prefix to Writer w.
+func printHexPtr(w io.Writer, p uintptr) {
+	// Null pointer.
+	num := uint64(p)
+	if num == 0 {
+		w.Write(nilAngleBytes)
+		return
+	}
+
+	// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
+	buf := make([]byte, 18)
+
+	// It's simpler to construct the hex string right to left.
+	base := uint64(16)
+	i := len(buf) - 1
+	for num >= base {
+		buf[i] = hexDigits[num%base]
+		num /= base
+		i--
+	}
+	buf[i] = hexDigits[num]
+
+	// Add '0x' prefix.
+	i--
+	buf[i] = 'x'
+	i--
+	buf[i] = '0'
+
+	// Strip unused leading bytes.
+	buf = buf[i:]
+	w.Write(buf)
+}
+
+// valuesSorter implements sort.Interface to allow a slice of reflect.Value
+// elements to be sorted.
+type valuesSorter struct {
+	values  []reflect.Value
+	strings []string // either nil or same len and values
+	cs      *ConfigState
+}
+
+// newValuesSorter initializes a valuesSorter instance, which holds a set of
+// surrogate keys on which the data should be sorted.  It uses flags in
+// ConfigState to decide if and how to populate those surrogate keys.
+func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
+	vs := &valuesSorter{values: values, cs: cs}
+	if canSortSimply(vs.values[0].Kind()) {
+		return vs
+	}
+	if !cs.DisableMethods {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			b := bytes.Buffer{}
+			if !handleMethods(cs, &b, vs.values[i]) {
+				vs.strings = nil
+				break
+			}
+			vs.strings[i] = b.String()
+		}
+	}
+	if vs.strings == nil && cs.SpewKeys {
+		vs.strings = make([]string, len(values))
+		for i := range vs.values {
+			vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
+		}
+	}
+	return vs
+}
+
+// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
+// directly, or whether it should be considered for sorting by surrogate keys
+// (if the ConfigState allows it).
+func canSortSimply(kind reflect.Kind) bool {
+	// This switch parallels valueSortLess, except for the default case.
+	switch kind {
+	case reflect.Bool:
+		return true
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return true
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return true
+	case reflect.Float32, reflect.Float64:
+		return true
+	case reflect.String:
+		return true
+	case reflect.Uintptr:
+		return true
+	case reflect.Array:
+		return true
+	}
+	return false
+}
+
+// Len returns the number of values in the slice.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Len() int {
+	return len(s.values)
+}
+
+// Swap swaps the values at the passed indices.  It is part of the
+// sort.Interface implementation.
+func (s *valuesSorter) Swap(i, j int) {
+	s.values[i], s.values[j] = s.values[j], s.values[i]
+	if s.strings != nil {
+		s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
+	}
+}
+
+// valueSortLess returns whether the first value should sort before the second
+// value.  It is used by valueSorter.Less as part of the sort.Interface
+// implementation.
+func valueSortLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		return a.Int() < b.Int()
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		return a.Uint() < b.Uint()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.String:
+		return a.String() < b.String()
+	case reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Array:
+		// Compare the contents of both arrays.
+		l := a.Len()
+		for i := 0; i < l; i++ {
+			av := a.Index(i)
+			bv := b.Index(i)
+			if av.Interface() == bv.Interface() {
+				continue
+			}
+			return valueSortLess(av, bv)
+		}
+	}
+	return a.String() < b.String()
+}
+
+// Less returns whether the value at index i should sort before the
+// value at index j.  It is part of the sort.Interface implementation.
+func (s *valuesSorter) Less(i, j int) bool {
+	if s.strings == nil {
+		return valueSortLess(s.values[i], s.values[j])
+	}
+	return s.strings[i] < s.strings[j]
+}
+
+// sortValues is a sort function that handles both native types and any type that
+// can be converted to error or Stringer.  Other inputs are sorted according to
+// their Value.String() value to ensure display stability.
+func sortValues(values []reflect.Value, cs *ConfigState) {
+	if len(values) == 0 {
+		return
+	}
+	sort.Sort(newValuesSorter(values, cs))
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go
new file mode 100644
index 0000000..2e3d22f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/config.go
@@ -0,0 +1,306 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"os"
+)
+
+// ConfigState houses the configuration options used by spew to format and
+// display values.  There is a global instance, Config, that is used to control
+// all top-level Formatter and Dump functionality.  Each ConfigState instance
+// provides methods equivalent to the top-level functions.
+//
+// The zero value for ConfigState provides no indentation.  You would typically
+// want to set it to a space or a tab.
+//
+// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
+// with default settings.  See the documentation of NewDefaultConfig for default
+// values.
+type ConfigState struct {
+	// Indent specifies the string to use for each indentation level.  The
+	// global config instance that all top-level functions use set this to a
+	// single space by default.  If you would like more indentation, you might
+	// set this to a tab with "\t" or perhaps two spaces with "  ".
+	Indent string
+
+	// MaxDepth controls the maximum number of levels to descend into nested
+	// data structures.  The default, 0, means there is no limit.
+	//
+	// NOTE: Circular data structures are properly detected, so it is not
+	// necessary to set this value unless you specifically want to limit deeply
+	// nested data structures.
+	MaxDepth int
+
+	// DisableMethods specifies whether or not error and Stringer interfaces are
+	// invoked for types that implement them.
+	DisableMethods bool
+
+	// DisablePointerMethods specifies whether or not to check for and invoke
+	// error and Stringer interfaces on types which only accept a pointer
+	// receiver when the current type is not a pointer.
+	//
+	// NOTE: This might be an unsafe action since calling one of these methods
+	// with a pointer receiver could technically mutate the value, however,
+	// in practice, types which choose to satisify an error or Stringer
+	// interface with a pointer receiver should not be mutating their state
+	// inside these interface methods.  As a result, this option relies on
+	// access to the unsafe package, so it will not have any effect when
+	// running in environments without access to the unsafe package such as
+	// Google App Engine or with the "safe" build tag specified.
+	DisablePointerMethods bool
+
+	// DisablePointerAddresses specifies whether to disable the printing of
+	// pointer addresses. This is useful when diffing data structures in tests.
+	DisablePointerAddresses bool
+
+	// DisableCapacities specifies whether to disable the printing of capacities
+	// for arrays, slices, maps and channels. This is useful when diffing
+	// data structures in tests.
+	DisableCapacities bool
+
+	// ContinueOnMethod specifies whether or not recursion should continue once
+	// a custom error or Stringer interface is invoked.  The default, false,
+	// means it will print the results of invoking the custom error or Stringer
+	// interface and return immediately instead of continuing to recurse into
+	// the internals of the data type.
+	//
+	// NOTE: This flag does not have any effect if method invocation is disabled
+	// via the DisableMethods or DisablePointerMethods options.
+	ContinueOnMethod bool
+
+	// SortKeys specifies map keys should be sorted before being printed. Use
+	// this to have a more deterministic, diffable output.  Note that only
+	// native types (bool, int, uint, floats, uintptr and string) and types
+	// that support the error or Stringer interfaces (if methods are
+	// enabled) are supported, with other types sorted according to the
+	// reflect.Value.String() output which guarantees display stability.
+	SortKeys bool
+
+	// SpewKeys specifies that, as a last resort attempt, map keys should
+	// be spewed to strings and sorted by those strings.  This is only
+	// considered if SortKeys is true.
+	SpewKeys bool
+}
+
+// Config is the active configuration of the top-level functions.
+// The configuration can be changed by modifying the contents of spew.Config.
+var Config = ConfigState{Indent: " "}
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the formatted string as a value that satisfies error.  See NewFormatter
+// for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, c.convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, c.convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, c.convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a Formatter interface returned by c.NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, c.convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(c.convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, c.convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(c.convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprint(a ...interface{}) string {
+	return fmt.Sprint(c.convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a Formatter interface returned by c.NewFormatter.  It returns
+// the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, c.convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a Formatter interface returned by c.NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
+func (c *ConfigState) Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(c.convertArgs(a)...)
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+c.Printf, c.Println, or c.Printf.
+*/
+func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(c, v)
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
+	fdump(c, w, a...)
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by modifying the public members
+of c.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func (c *ConfigState) Dump(a ...interface{}) {
+	fdump(c, os.Stdout, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func (c *ConfigState) Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(c, &buf, a...)
+	return buf.String()
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a spew Formatter interface using
+// the ConfigState associated with s.
+func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = newFormatter(c, arg)
+	}
+	return formatters
+}
+
+// NewDefaultConfig returns a ConfigState with the following default settings.
+//
+// 	Indent: " "
+// 	MaxDepth: 0
+// 	DisableMethods: false
+// 	DisablePointerMethods: false
+// 	ContinueOnMethod: false
+// 	SortKeys: false
+func NewDefaultConfig() *ConfigState {
+	return &ConfigState{Indent: " "}
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go
new file mode 100644
index 0000000..aacaac6
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/doc.go
@@ -0,0 +1,211 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+Package spew implements a deep pretty printer for Go data structures to aid in
+debugging.
+
+A quick overview of the additional features spew provides over the built-in
+printing facilities for Go data types are as follows:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output (only when using
+	  Dump style)
+
+There are two different approaches spew allows for dumping Go data structures:
+
+	* Dump style which prints with newlines, customizable indentation,
+	  and additional debug information such as types and all pointer addresses
+	  used to indirect to the final value
+	* A custom Formatter interface that integrates cleanly with the standard fmt
+	  package and replaces %v, %+v, %#v, and %#+v to provide inline printing
+	  similar to the default %v while providing the additional functionality
+	  outlined above and passing unsupported format verbs such as %x and %q
+	  along to fmt
+
+Quick Start
+
+This section demonstrates how to quickly get started with spew.  See the
+sections below for further details on formatting and configuration options.
+
+To dump a variable with full newlines, indentation, type, and pointer
+information use Dump, Fdump, or Sdump:
+	spew.Dump(myVar1, myVar2, ...)
+	spew.Fdump(someWriter, myVar1, myVar2, ...)
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Alternatively, if you would prefer to use format strings with a compacted inline
+printing style, use the convenience wrappers Printf, Fprintf, etc with
+%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
+%#+v (adds types and pointer addresses):
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+Configuration Options
+
+Configuration of spew is handled by fields in the ConfigState type.  For
+convenience, all of the top-level functions use a global state available
+via the spew.Config global.
+
+It is also possible to create a ConfigState instance that provides methods
+equivalent to the top-level functions.  This allows concurrent configuration
+options.  See the ConfigState documentation for more details.
+
+The following configuration options are available:
+	* Indent
+		String to use for each indentation level for Dump functions.
+		It is a single space by default.  A popular alternative is "\t".
+
+	* MaxDepth
+		Maximum number of levels to descend into nested data structures.
+		There is no limit by default.
+
+	* DisableMethods
+		Disables invocation of error and Stringer interface methods.
+		Method invocation is enabled by default.
+
+	* DisablePointerMethods
+		Disables invocation of error and Stringer interface methods on types
+		which only accept pointer receivers from non-pointer variables.
+		Pointer method invocation is enabled by default.
+
+	* DisablePointerAddresses
+		DisablePointerAddresses specifies whether to disable the printing of
+		pointer addresses. This is useful when diffing data structures in tests.
+
+	* DisableCapacities
+		DisableCapacities specifies whether to disable the printing of
+		capacities for arrays, slices, maps and channels. This is useful when
+		diffing data structures in tests.
+
+	* ContinueOnMethod
+		Enables recursion into types after invoking error and Stringer interface
+		methods. Recursion after method invocation is disabled by default.
+
+	* SortKeys
+		Specifies map keys should be sorted before being printed. Use
+		this to have a more deterministic, diffable output.  Note that
+		only native types (bool, int, uint, floats, uintptr and string)
+		and types which implement error or Stringer interfaces are
+		supported with other types sorted according to the
+		reflect.Value.String() output which guarantees display
+		stability.  Natural map order is used by default.
+
+	* SpewKeys
+		Specifies that, as a last resort attempt, map keys should be
+		spewed to strings and sorted by those strings.  This is only
+		considered if SortKeys is true.
+
+Dump Usage
+
+Simply call spew.Dump with a list of variables you want to dump:
+
+	spew.Dump(myVar1, myVar2, ...)
+
+You may also call spew.Fdump if you would prefer to output to an arbitrary
+io.Writer.  For example, to dump to standard error:
+
+	spew.Fdump(os.Stderr, myVar1, myVar2, ...)
+
+A third option is to call spew.Sdump to get the formatted output as a string:
+
+	str := spew.Sdump(myVar1, myVar2, ...)
+
+Sample Dump Output
+
+See the Dump example for details on the setup of the types and variables being
+shown here.
+
+	(main.Foo) {
+	 unexportedField: (*main.Bar)(0xf84002e210)({
+	  flag: (main.Flag) flagTwo,
+	  data: (uintptr) <nil>
+	 }),
+	 ExportedField: (map[interface {}]interface {}) (len=1) {
+	  (string) (len=3) "one": (bool) true
+	 }
+	}
+
+Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
+command as shown.
+	([]uint8) (len=32 cap=32) {
+	 00000000  11 12 13 14 15 16 17 18  19 1a 1b 1c 1d 1e 1f 20  |............... |
+	 00000010  21 22 23 24 25 26 27 28  29 2a 2b 2c 2d 2e 2f 30  |!"#$%&'()*+,-./0|
+	 00000020  31 32                                             |12|
+	}
+
+Custom Formatter
+
+Spew provides a custom formatter that implements the fmt.Formatter interface
+so that it integrates cleanly with standard fmt package printing functions. The
+formatter is useful for inline printing of smaller data types similar to the
+standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Custom Formatter Usage
+
+The simplest way to make use of the spew custom formatter is to call one of the
+convenience functions such as spew.Printf, spew.Println, or spew.Printf.  The
+functions have syntax you are most likely already familiar with:
+
+	spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+	spew.Println(myVar, myVar2)
+	spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
+	spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
+
+See the Index for the full list convenience functions.
+
+Sample Formatter Output
+
+Double pointer to a uint8:
+	  %v: <**>5
+	 %+v: <**>(0xf8400420d0->0xf8400420c8)5
+	 %#v: (**uint8)5
+	%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
+
+Pointer to circular struct with a uint8 field and a pointer to itself:
+	  %v: <*>{1 <*><shown>}
+	 %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
+	 %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
+	%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
+
+See the Printf example for details on the setup of variables being shown
+here.
+
+Errors
+
+Since it is possible for custom Stringer/error interfaces to panic, spew
+detects them and handles them internally by printing the panic information
+inline with the output.  Since spew is intended to provide deep pretty printing
+capabilities on structures, it intentionally does not return any errors.
+*/
+package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
new file mode 100644
index 0000000..f78d89f
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+var (
+	// uint8Type is a reflect.Type representing a uint8.  It is used to
+	// convert cgo types to uint8 slices for hexdumping.
+	uint8Type = reflect.TypeOf(uint8(0))
+
+	// cCharRE is a regular expression that matches a cgo char.
+	// It is used to detect character arrays to hexdump them.
+	cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
+
+	// cUnsignedCharRE is a regular expression that matches a cgo unsigned
+	// char.  It is used to detect unsigned character arrays to hexdump
+	// them.
+	cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
+
+	// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
+	// It is used to detect uint8_t arrays to hexdump them.
+	cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
+)
+
+// dumpState contains information about the state of a dump operation.
+type dumpState struct {
+	w                io.Writer
+	depth            int
+	pointers         map[uintptr]int
+	ignoreNextType   bool
+	ignoreNextIndent bool
+	cs               *ConfigState
+}
+
+// indent performs indentation according to the depth level and cs.Indent
+// option.
+func (d *dumpState) indent() {
+	if d.ignoreNextIndent {
+		d.ignoreNextIndent = false
+		return
+	}
+	d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface && !v.IsNil() {
+		v = v.Elem()
+	}
+	return v
+}
+
+// dumpPtr handles formatting of pointers by indirecting them as necessary.
+func (d *dumpState) dumpPtr(v reflect.Value) {
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range d.pointers {
+		if depth >= d.depth {
+			delete(d.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by dereferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := d.pointers[addr]; ok && pd < d.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		d.pointers[addr] = d.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type information.
+	d.w.Write(openParenBytes)
+	d.w.Write(bytes.Repeat(asteriskBytes, indirects))
+	d.w.Write([]byte(ve.Type().String()))
+	d.w.Write(closeParenBytes)
+
+	// Display pointer information.
+	if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
+		d.w.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				d.w.Write(pointerChainBytes)
+			}
+			printHexPtr(d.w, addr)
+		}
+		d.w.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	d.w.Write(openParenBytes)
+	switch {
+	case nilFound:
+		d.w.Write(nilAngleBytes)
+
+	case cycleFound:
+		d.w.Write(circularBytes)
+
+	default:
+		d.ignoreNextType = true
+		d.dump(ve)
+	}
+	d.w.Write(closeParenBytes)
+}
+
+// dumpSlice handles formatting of arrays and slices.  Byte (uint8 under
+// reflection) arrays and slices are dumped in hexdump -C fashion.
+func (d *dumpState) dumpSlice(v reflect.Value) {
+	// Determine whether this type should be hex dumped or not.  Also,
+	// for types which should be hexdumped, try to use the underlying data
+	// first, then fall back to trying to convert them to a uint8 slice.
+	var buf []uint8
+	doConvert := false
+	doHexDump := false
+	numEntries := v.Len()
+	if numEntries > 0 {
+		vt := v.Index(0).Type()
+		vts := vt.String()
+		switch {
+		// C types that need to be converted.
+		case cCharRE.MatchString(vts):
+			fallthrough
+		case cUnsignedCharRE.MatchString(vts):
+			fallthrough
+		case cUint8tCharRE.MatchString(vts):
+			doConvert = true
+
+		// Try to use existing uint8 slices and fall back to converting
+		// and copying if that fails.
+		case vt.Kind() == reflect.Uint8:
+			// We need an addressable interface to convert the type
+			// to a byte slice.  However, the reflect package won't
+			// give us an interface on certain things like
+			// unexported struct fields in order to enforce
+			// visibility rules.  We use unsafe, when available, to
+			// bypass these restrictions since this package does not
+			// mutate the values.
+			vs := v
+			if !vs.CanInterface() || !vs.CanAddr() {
+				vs = unsafeReflectValue(vs)
+			}
+			if !UnsafeDisabled {
+				vs = vs.Slice(0, numEntries)
+
+				// Use the existing uint8 slice if it can be
+				// type asserted.
+				iface := vs.Interface()
+				if slice, ok := iface.([]uint8); ok {
+					buf = slice
+					doHexDump = true
+					break
+				}
+			}
+
+			// The underlying data needs to be converted if it can't
+			// be type asserted to a uint8 slice.
+			doConvert = true
+		}
+
+		// Copy and convert the underlying type if needed.
+		if doConvert && vt.ConvertibleTo(uint8Type) {
+			// Convert and copy each element into a uint8 byte
+			// slice.
+			buf = make([]uint8, numEntries)
+			for i := 0; i < numEntries; i++ {
+				vv := v.Index(i)
+				buf[i] = uint8(vv.Convert(uint8Type).Uint())
+			}
+			doHexDump = true
+		}
+	}
+
+	// Hexdump the entire slice as needed.
+	if doHexDump {
+		indent := strings.Repeat(d.cs.Indent, d.depth)
+		str := indent + hex.Dump(buf)
+		str = strings.Replace(str, "\n", "\n"+indent, -1)
+		str = strings.TrimRight(str, d.cs.Indent)
+		d.w.Write([]byte(str))
+		return
+	}
+
+	// Recursively call dump for each item.
+	for i := 0; i < numEntries; i++ {
+		d.dump(d.unpackValue(v.Index(i)))
+		if i < (numEntries - 1) {
+			d.w.Write(commaNewlineBytes)
+		} else {
+			d.w.Write(newlineBytes)
+		}
+	}
+}
+
+// dump is the main workhorse for dumping a value.  It uses the passed reflect
+// value to figure out what kind of object we are dealing with and formats it
+// appropriately.  It is a recursive function, however circular data structures
+// are detected and handled properly.
+func (d *dumpState) dump(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		d.w.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		d.indent()
+		d.dumpPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !d.ignoreNextType {
+		d.indent()
+		d.w.Write(openParenBytes)
+		d.w.Write([]byte(v.Type().String()))
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+	d.ignoreNextType = false
+
+	// Display length and capacity if the built-in len and cap functions
+	// work with the value's kind and the len/cap itself is non-zero.
+	valueLen, valueCap := 0, 0
+	switch v.Kind() {
+	case reflect.Array, reflect.Slice, reflect.Chan:
+		valueLen, valueCap = v.Len(), v.Cap()
+	case reflect.Map, reflect.String:
+		valueLen = v.Len()
+	}
+	if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
+		d.w.Write(openParenBytes)
+		if valueLen != 0 {
+			d.w.Write(lenEqualsBytes)
+			printInt(d.w, int64(valueLen), 10)
+		}
+		if !d.cs.DisableCapacities && valueCap != 0 {
+			if valueLen != 0 {
+				d.w.Write(spaceBytes)
+			}
+			d.w.Write(capEqualsBytes)
+			printInt(d.w, int64(valueCap), 10)
+		}
+		d.w.Write(closeParenBytes)
+		d.w.Write(spaceBytes)
+	}
+
+	// Call Stringer/error interfaces if they exist and the handle methods flag
+	// is enabled
+	if !d.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(d.cs, d.w, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(d.w, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(d.w, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(d.w, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(d.w, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(d.w, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(d.w, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(d.w, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			d.dumpSlice(v)
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.String:
+		d.w.Write([]byte(strconv.Quote(v.String())))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			d.w.Write(nilAngleBytes)
+			break
+		}
+
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			numEntries := v.Len()
+			keys := v.MapKeys()
+			if d.cs.SortKeys {
+				sortValues(keys, d.cs)
+			}
+			for i, key := range keys {
+				d.dump(d.unpackValue(key))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.MapIndex(key)))
+				if i < (numEntries - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Struct:
+		d.w.Write(openBraceNewlineBytes)
+		d.depth++
+		if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
+			d.indent()
+			d.w.Write(maxNewlineBytes)
+		} else {
+			vt := v.Type()
+			numFields := v.NumField()
+			for i := 0; i < numFields; i++ {
+				d.indent()
+				vtf := vt.Field(i)
+				d.w.Write([]byte(vtf.Name))
+				d.w.Write(colonSpaceBytes)
+				d.ignoreNextIndent = true
+				d.dump(d.unpackValue(v.Field(i)))
+				if i < (numFields - 1) {
+					d.w.Write(commaNewlineBytes)
+				} else {
+					d.w.Write(newlineBytes)
+				}
+			}
+		}
+		d.depth--
+		d.indent()
+		d.w.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(d.w, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(d.w, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it in case any new
+	// types are added.
+	default:
+		if v.CanInterface() {
+			fmt.Fprintf(d.w, "%v", v.Interface())
+		} else {
+			fmt.Fprintf(d.w, "%v", v.String())
+		}
+	}
+}
+
+// fdump is a helper function to consolidate the logic from the various public
+// methods which take varying writers and config states.
+func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
+	for _, arg := range a {
+		if arg == nil {
+			w.Write(interfaceBytes)
+			w.Write(spaceBytes)
+			w.Write(nilAngleBytes)
+			w.Write(newlineBytes)
+			continue
+		}
+
+		d := dumpState{w: w, cs: cs}
+		d.pointers = make(map[uintptr]int)
+		d.dump(reflect.ValueOf(arg))
+		d.w.Write(newlineBytes)
+	}
+}
+
+// Fdump formats and displays the passed arguments to io.Writer w.  It formats
+// exactly the same as Dump.
+func Fdump(w io.Writer, a ...interface{}) {
+	fdump(&Config, w, a...)
+}
+
+// Sdump returns a string with the passed arguments formatted exactly the same
+// as Dump.
+func Sdump(a ...interface{}) string {
+	var buf bytes.Buffer
+	fdump(&Config, &buf, a...)
+	return buf.String()
+}
+
+/*
+Dump displays the passed parameters to standard out with newlines, customizable
+indentation, and additional debug information such as complete types and all
+pointer addresses used to indirect to the final value.  It provides the
+following features over the built-in printing facilities provided by the fmt
+package:
+
+	* Pointers are dereferenced and followed
+	* Circular data structures are detected and handled properly
+	* Custom Stringer/error interfaces are optionally invoked, including
+	  on unexported types
+	* Custom types which only implement the Stringer/error interfaces via
+	  a pointer receiver are optionally invoked when passing non-pointer
+	  variables
+	* Byte arrays and slices are dumped like the hexdump -C command which
+	  includes offsets, byte values in hex, and ASCII output
+
+The configuration options are controlled by an exported package global,
+spew.Config.  See ConfigState for options documentation.
+
+See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
+get the formatted result as a string.
+*/
+func Dump(a ...interface{}) {
+	fdump(&Config, os.Stdout, a...)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
new file mode 100644
index 0000000..b04edb7
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"bytes"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// supportedFlags is a list of all the character flags supported by fmt package.
+const supportedFlags = "0-+# "
+
+// formatState implements the fmt.Formatter interface and contains information
+// about the state of a formatting operation.  The NewFormatter function can
+// be used to get a new Formatter which can be used directly as arguments
+// in standard fmt package printing calls.
+type formatState struct {
+	value          interface{}
+	fs             fmt.State
+	depth          int
+	pointers       map[uintptr]int
+	ignoreNextType bool
+	cs             *ConfigState
+}
+
+// buildDefaultFormat recreates the original format string without precision
+// and width information to pass in to fmt.Sprintf in the case of an
+// unrecognized type.  Unless new types are added to the language, this
+// function won't ever be called.
+func (f *formatState) buildDefaultFormat() (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	buf.WriteRune('v')
+
+	format = buf.String()
+	return format
+}
+
+// constructOrigFormat recreates the original format string including precision
+// and width information to pass along to the standard fmt package.  This allows
+// automatic deferral of all format strings this package doesn't support.
+func (f *formatState) constructOrigFormat(verb rune) (format string) {
+	buf := bytes.NewBuffer(percentBytes)
+
+	for _, flag := range supportedFlags {
+		if f.fs.Flag(int(flag)) {
+			buf.WriteRune(flag)
+		}
+	}
+
+	if width, ok := f.fs.Width(); ok {
+		buf.WriteString(strconv.Itoa(width))
+	}
+
+	if precision, ok := f.fs.Precision(); ok {
+		buf.Write(precisionBytes)
+		buf.WriteString(strconv.Itoa(precision))
+	}
+
+	buf.WriteRune(verb)
+
+	format = buf.String()
+	return format
+}
+
+// unpackValue returns values inside of non-nil interfaces when possible and
+// ensures that types for values which have been unpacked from an interface
+// are displayed when the show types flag is also set.
+// This is useful for data types like structs, arrays, slices, and maps which
+// can contain varying types packed inside an interface.
+func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
+	if v.Kind() == reflect.Interface {
+		f.ignoreNextType = false
+		if !v.IsNil() {
+			v = v.Elem()
+		}
+	}
+	return v
+}
+
+// formatPtr handles formatting of pointers by indirecting them as necessary.
+func (f *formatState) formatPtr(v reflect.Value) {
+	// Display nil if top level pointer is nil.
+	showTypes := f.fs.Flag('#')
+	if v.IsNil() && (!showTypes || f.ignoreNextType) {
+		f.fs.Write(nilAngleBytes)
+		return
+	}
+
+	// Remove pointers at or below the current depth from map used to detect
+	// circular refs.
+	for k, depth := range f.pointers {
+		if depth >= f.depth {
+			delete(f.pointers, k)
+		}
+	}
+
+	// Keep list of all dereferenced pointers to possibly show later.
+	pointerChain := make([]uintptr, 0)
+
+	// Figure out how many levels of indirection there are by derferencing
+	// pointers and unpacking interfaces down the chain while detecting circular
+	// references.
+	nilFound := false
+	cycleFound := false
+	indirects := 0
+	ve := v
+	for ve.Kind() == reflect.Ptr {
+		if ve.IsNil() {
+			nilFound = true
+			break
+		}
+		indirects++
+		addr := ve.Pointer()
+		pointerChain = append(pointerChain, addr)
+		if pd, ok := f.pointers[addr]; ok && pd < f.depth {
+			cycleFound = true
+			indirects--
+			break
+		}
+		f.pointers[addr] = f.depth
+
+		ve = ve.Elem()
+		if ve.Kind() == reflect.Interface {
+			if ve.IsNil() {
+				nilFound = true
+				break
+			}
+			ve = ve.Elem()
+		}
+	}
+
+	// Display type or indirection level depending on flags.
+	if showTypes && !f.ignoreNextType {
+		f.fs.Write(openParenBytes)
+		f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
+		f.fs.Write([]byte(ve.Type().String()))
+		f.fs.Write(closeParenBytes)
+	} else {
+		if nilFound || cycleFound {
+			indirects += strings.Count(ve.Type().String(), "*")
+		}
+		f.fs.Write(openAngleBytes)
+		f.fs.Write([]byte(strings.Repeat("*", indirects)))
+		f.fs.Write(closeAngleBytes)
+	}
+
+	// Display pointer information depending on flags.
+	if f.fs.Flag('+') && (len(pointerChain) > 0) {
+		f.fs.Write(openParenBytes)
+		for i, addr := range pointerChain {
+			if i > 0 {
+				f.fs.Write(pointerChainBytes)
+			}
+			printHexPtr(f.fs, addr)
+		}
+		f.fs.Write(closeParenBytes)
+	}
+
+	// Display dereferenced value.
+	switch {
+	case nilFound:
+		f.fs.Write(nilAngleBytes)
+
+	case cycleFound:
+		f.fs.Write(circularShortBytes)
+
+	default:
+		f.ignoreNextType = true
+		f.format(ve)
+	}
+}
+
+// format is the main workhorse for providing the Formatter interface.  It
+// uses the passed reflect value to figure out what kind of object we are
+// dealing with and formats it appropriately.  It is a recursive function,
+// however circular data structures are detected and handled properly.
+func (f *formatState) format(v reflect.Value) {
+	// Handle invalid reflect values immediately.
+	kind := v.Kind()
+	if kind == reflect.Invalid {
+		f.fs.Write(invalidAngleBytes)
+		return
+	}
+
+	// Handle pointers specially.
+	if kind == reflect.Ptr {
+		f.formatPtr(v)
+		return
+	}
+
+	// Print type information unless already handled elsewhere.
+	if !f.ignoreNextType && f.fs.Flag('#') {
+		f.fs.Write(openParenBytes)
+		f.fs.Write([]byte(v.Type().String()))
+		f.fs.Write(closeParenBytes)
+	}
+	f.ignoreNextType = false
+
+	// Call Stringer/error interfaces if they exist and the handle methods
+	// flag is enabled.
+	if !f.cs.DisableMethods {
+		if (kind != reflect.Invalid) && (kind != reflect.Interface) {
+			if handled := handleMethods(f.cs, f.fs, v); handled {
+				return
+			}
+		}
+	}
+
+	switch kind {
+	case reflect.Invalid:
+		// Do nothing.  We should never get here since invalid has already
+		// been handled above.
+
+	case reflect.Bool:
+		printBool(f.fs, v.Bool())
+
+	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+		printInt(f.fs, v.Int(), 10)
+
+	case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+		printUint(f.fs, v.Uint(), 10)
+
+	case reflect.Float32:
+		printFloat(f.fs, v.Float(), 32)
+
+	case reflect.Float64:
+		printFloat(f.fs, v.Float(), 64)
+
+	case reflect.Complex64:
+		printComplex(f.fs, v.Complex(), 32)
+
+	case reflect.Complex128:
+		printComplex(f.fs, v.Complex(), 64)
+
+	case reflect.Slice:
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+		fallthrough
+
+	case reflect.Array:
+		f.fs.Write(openBracketBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			numEntries := v.Len()
+			for i := 0; i < numEntries; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.Index(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBracketBytes)
+
+	case reflect.String:
+		f.fs.Write([]byte(v.String()))
+
+	case reflect.Interface:
+		// The only time we should get here is for nil interfaces due to
+		// unpackValue calls.
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+		}
+
+	case reflect.Ptr:
+		// Do nothing.  We should never get here since pointers have already
+		// been handled above.
+
+	case reflect.Map:
+		// nil maps should be indicated as different than empty maps
+		if v.IsNil() {
+			f.fs.Write(nilAngleBytes)
+			break
+		}
+
+		f.fs.Write(openMapBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			keys := v.MapKeys()
+			if f.cs.SortKeys {
+				sortValues(keys, f.cs)
+			}
+			for i, key := range keys {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				f.ignoreNextType = true
+				f.format(f.unpackValue(key))
+				f.fs.Write(colonBytes)
+				f.ignoreNextType = true
+				f.format(f.unpackValue(v.MapIndex(key)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeMapBytes)
+
+	case reflect.Struct:
+		numFields := v.NumField()
+		f.fs.Write(openBraceBytes)
+		f.depth++
+		if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
+			f.fs.Write(maxShortBytes)
+		} else {
+			vt := v.Type()
+			for i := 0; i < numFields; i++ {
+				if i > 0 {
+					f.fs.Write(spaceBytes)
+				}
+				vtf := vt.Field(i)
+				if f.fs.Flag('+') || f.fs.Flag('#') {
+					f.fs.Write([]byte(vtf.Name))
+					f.fs.Write(colonBytes)
+				}
+				f.format(f.unpackValue(v.Field(i)))
+			}
+		}
+		f.depth--
+		f.fs.Write(closeBraceBytes)
+
+	case reflect.Uintptr:
+		printHexPtr(f.fs, uintptr(v.Uint()))
+
+	case reflect.UnsafePointer, reflect.Chan, reflect.Func:
+		printHexPtr(f.fs, v.Pointer())
+
+	// There were not any other types at the time this code was written, but
+	// fall back to letting the default fmt package handle it if any get added.
+	default:
+		format := f.buildDefaultFormat()
+		if v.CanInterface() {
+			fmt.Fprintf(f.fs, format, v.Interface())
+		} else {
+			fmt.Fprintf(f.fs, format, v.String())
+		}
+	}
+}
+
+// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
+// details.
+func (f *formatState) Format(fs fmt.State, verb rune) {
+	f.fs = fs
+
+	// Use standard formatting for verbs that are not v.
+	if verb != 'v' {
+		format := f.constructOrigFormat(verb)
+		fmt.Fprintf(fs, format, f.value)
+		return
+	}
+
+	if f.value == nil {
+		if fs.Flag('#') {
+			fs.Write(interfaceBytes)
+		}
+		fs.Write(nilAngleBytes)
+		return
+	}
+
+	f.format(reflect.ValueOf(f.value))
+}
+
+// newFormatter is a helper function to consolidate the logic from the various
+// public methods which take varying config states.
+func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
+	fs := &formatState{value: v, cs: cs}
+	fs.pointers = make(map[uintptr]int)
+	return fs
+}
+
+/*
+NewFormatter returns a custom formatter that satisfies the fmt.Formatter
+interface.  As a result, it integrates cleanly with standard fmt package
+printing functions.  The formatter is useful for inline printing of smaller data
+types similar to the standard %v format specifier.
+
+The custom formatter only responds to the %v (most compact), %+v (adds pointer
+addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
+combinations.  Any other verbs such as %x and %q will be sent to the the
+standard fmt package for formatting.  In addition, the custom formatter ignores
+the width and precision arguments (however they will still work on the format
+specifiers not handled by the custom formatter).
+
+Typically this function shouldn't be called directly.  It is much easier to make
+use of the custom formatter by calling one of the convenience functions such as
+Printf, Println, or Fprintf.
+*/
+func NewFormatter(v interface{}) fmt.Formatter {
+	return newFormatter(&Config, v)
+}
diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go
new file mode 100644
index 0000000..32c0e33
--- /dev/null
+++ b/vendor/github.com/davecgh/go-spew/spew/spew.go
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+package spew
+
+import (
+	"fmt"
+	"io"
+)
+
+// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the formatted string as a value that satisfies error.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Errorf(format string, a ...interface{}) (err error) {
+	return fmt.Errorf(format, convertArgs(a)...)
+}
+
+// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprint(w, convertArgs(a)...)
+}
+
+// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
+	return fmt.Fprintf(w, format, convertArgs(a)...)
+}
+
+// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
+// passed with a default Formatter interface returned by NewFormatter.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
+func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
+	return fmt.Fprintln(w, convertArgs(a)...)
+}
+
+// Print is a wrapper for fmt.Print that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
+func Print(a ...interface{}) (n int, err error) {
+	return fmt.Print(convertArgs(a)...)
+}
+
+// Printf is a wrapper for fmt.Printf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Printf(format string, a ...interface{}) (n int, err error) {
+	return fmt.Printf(format, convertArgs(a)...)
+}
+
+// Println is a wrapper for fmt.Println that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the number of bytes written and any write error encountered.  See
+// NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
+func Println(a ...interface{}) (n int, err error) {
+	return fmt.Println(convertArgs(a)...)
+}
+
+// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprint(a ...interface{}) string {
+	return fmt.Sprint(convertArgs(a)...)
+}
+
+// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
+// passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintf(format string, a ...interface{}) string {
+	return fmt.Sprintf(format, convertArgs(a)...)
+}
+
+// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
+// were passed with a default Formatter interface returned by NewFormatter.  It
+// returns the resulting string.  See NewFormatter for formatting details.
+//
+// This function is shorthand for the following syntax:
+//
+//	fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
+func Sprintln(a ...interface{}) string {
+	return fmt.Sprintln(convertArgs(a)...)
+}
+
+// convertArgs accepts a slice of arguments and returns a slice of the same
+// length with each argument converted to a default spew Formatter interface.
+func convertArgs(args []interface{}) (formatters []interface{}) {
+	formatters = make([]interface{}, len(args))
+	for index, arg := range args {
+		formatters[index] = NewFormatter(arg)
+	}
+	return formatters
+}
diff --git a/vendor/github.com/fiatjaf/eventstore/.gitignore b/vendor/github.com/fiatjaf/eventstore/.gitignore
index e69de29..182da1e 100644
--- a/vendor/github.com/fiatjaf/eventstore/.gitignore
+++ b/vendor/github.com/fiatjaf/eventstore/.gitignore
@@ -0,0 +1 @@
+knowledge.md
diff --git a/vendor/github.com/fiatjaf/eventstore/postgresql/postgresql.go b/vendor/github.com/fiatjaf/eventstore/postgresql/postgresql.go
index 1e79450..778a0ed 100644
--- a/vendor/github.com/fiatjaf/eventstore/postgresql/postgresql.go
+++ b/vendor/github.com/fiatjaf/eventstore/postgresql/postgresql.go
@@ -1,10 +1,13 @@
 package postgresql
 
 import (
+	"sync"
+
 	"github.com/jmoiron/sqlx"
 )
 
 type PostgresBackend struct {
+	sync.Mutex
 	*sqlx.DB
 	DatabaseURL       string
 	QueryLimit        int
diff --git a/vendor/github.com/fiatjaf/eventstore/postgresql/replace.go b/vendor/github.com/fiatjaf/eventstore/postgresql/replace.go
new file mode 100644
index 0000000..c4997f6
--- /dev/null
+++ b/vendor/github.com/fiatjaf/eventstore/postgresql/replace.go
@@ -0,0 +1,44 @@
+package postgresql
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/fiatjaf/eventstore"
+	"github.com/fiatjaf/eventstore/internal"
+	"github.com/nbd-wtf/go-nostr"
+)
+
+func (b *PostgresBackend) ReplaceEvent(ctx context.Context, evt *nostr.Event) error {
+	b.Lock()
+	defer b.Unlock()
+
+	filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
+	if nostr.IsAddressableKind(evt.Kind) {
+		filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
+	}
+
+	ch, err := b.QueryEvents(ctx, filter)
+	if err != nil {
+		return fmt.Errorf("failed to query before replacing: %w", err)
+	}
+
+	shouldStore := true
+	for previous := range ch {
+		if internal.IsOlder(previous, evt) {
+			if err := b.DeleteEvent(ctx, previous); err != nil {
+				return fmt.Errorf("failed to delete event for replacing: %w", err)
+			}
+		} else {
+			shouldStore = false
+		}
+	}
+
+	if shouldStore {
+		if err := b.SaveEvent(ctx, evt); err != nil && err != eventstore.ErrDupEvent {
+			return fmt.Errorf("failed to save: %w", err)
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/fiatjaf/eventstore/relay_interface.go b/vendor/github.com/fiatjaf/eventstore/relay_interface.go
index f754de2..77259e4 100644
--- a/vendor/github.com/fiatjaf/eventstore/relay_interface.go
+++ b/vendor/github.com/fiatjaf/eventstore/relay_interface.go
@@ -4,7 +4,6 @@ import (
 	"context"
 	"fmt"
 
-	"github.com/fiatjaf/eventstore/internal"
 	"github.com/nbd-wtf/go-nostr"
 )
 
@@ -31,41 +30,8 @@ func (w RelayWrapper) Publish(ctx context.Context, evt nostr.Event) error {
 		return nil
 	}
 
-	// from now on we know they are replaceable or addressable
-	if replacer, ok := w.Store.(Replacer); ok {
-		// use the replacer interface to potentially reduce queries and race conditions
-		replacer.Replace(ctx, &evt)
-	} else {
-		// otherwise do it the manual way
-		filter := nostr.Filter{Limit: 1, Kinds: []int{evt.Kind}, Authors: []string{evt.PubKey}}
-		if nostr.IsAddressableKind(evt.Kind) {
-			// when addressable, add the "d" tag to the filter
-			filter.Tags = nostr.TagMap{"d": []string{evt.Tags.GetD()}}
-		}
-
-		// now we fetch the past events, whatever they are, delete them and then save the new
-		ch, err := w.Store.QueryEvents(ctx, filter)
-		if err != nil {
-			return fmt.Errorf("failed to query before replacing: %w", err)
-		}
-
-		shouldStore := true
-		for previous := range ch {
-			if internal.IsOlder(previous, &evt) {
-				if err := w.Store.DeleteEvent(ctx, previous); err != nil {
-					return fmt.Errorf("failed to delete event for replacing: %w", err)
-				}
-			} else {
-				// there is a newer event already stored, so we won't store this
-				shouldStore = false
-			}
-		}
-		if shouldStore {
-			if err := w.SaveEvent(ctx, &evt); err != nil && err != ErrDupEvent {
-				return fmt.Errorf("failed to save: %w", err)
-			}
-		}
-	}
+	// others are replaced
+	w.Store.ReplaceEvent(ctx, &evt)
 
 	return nil
 }
diff --git a/vendor/github.com/fiatjaf/eventstore/store.go b/vendor/github.com/fiatjaf/eventstore/store.go
index 8856708..58d16e6 100644
--- a/vendor/github.com/fiatjaf/eventstore/store.go
+++ b/vendor/github.com/fiatjaf/eventstore/store.go
@@ -22,10 +22,9 @@ type Store interface {
 	DeleteEvent(context.Context, *nostr.Event) error
 	// SaveEvent just saves an event, no side-effects.
 	SaveEvent(context.Context, *nostr.Event) error
-}
-
-type Replacer interface {
-	Replace(context.Context, *nostr.Event) error
+	// ReplaceEvent atomically replaces a replaceable or addressable event.
+	// Conceptually it is like a Query->Delete->Save, but streamlined.
+	ReplaceEvent(context.Context, *nostr.Event) error
 }
 
 type Counter interface {
diff --git a/vendor/github.com/fiatjaf/khatru/.gitignore b/vendor/github.com/fiatjaf/khatru/.gitignore
index f85f391..34e3433 100644
--- a/vendor/github.com/fiatjaf/khatru/.gitignore
+++ b/vendor/github.com/fiatjaf/khatru/.gitignore
@@ -1,2 +1,2 @@
 *.env
-rss-bridge
\ No newline at end of file
+knowledge.md
diff --git a/vendor/github.com/fiatjaf/khatru/README.md b/vendor/github.com/fiatjaf/khatru/README.md
index 841bf46..8549e5b 100644
--- a/vendor/github.com/fiatjaf/khatru/README.md
+++ b/vendor/github.com/fiatjaf/khatru/README.md
@@ -127,6 +127,7 @@ Fear no more. Using the https://github.com/fiatjaf/eventstore module you get a b
 	relay.QueryEvents = append(relay.QueryEvents, db.QueryEvents)
 	relay.CountEvents = append(relay.CountEvents, db.CountEvents)
 	relay.DeleteEvent = append(relay.DeleteEvent, db.DeleteEvent)
+	relay.ReplaceEvent = append(relay.ReplaceEvent, db.ReplaceEvent)
 ```
 
 ### But I don't want to write a bunch of custom policies!
diff --git a/vendor/github.com/fiatjaf/khatru/adding.go b/vendor/github.com/fiatjaf/khatru/adding.go
index 86b3612..caa021a 100644
--- a/vendor/github.com/fiatjaf/khatru/adding.go
+++ b/vendor/github.com/fiatjaf/khatru/adding.go
@@ -106,6 +106,9 @@ func (rl *Relay) AddEvent(ctx context.Context, evt *nostr.Event) (skipBroadcast
 		for _, ons := range rl.OnEventSaved {
 			ons(ctx, evt)
 		}
+
+		// track event expiration if applicable
+		rl.expirationManager.trackEvent(evt)
 	}
 
 	return false, nil
diff --git a/vendor/github.com/fiatjaf/khatru/expiration.go b/vendor/github.com/fiatjaf/khatru/expiration.go
new file mode 100644
index 0000000..9183e52
--- /dev/null
+++ b/vendor/github.com/fiatjaf/khatru/expiration.go
@@ -0,0 +1,135 @@
+package khatru
+
+import (
+	"container/heap"
+	"context"
+	"sync"
+	"time"
+
+	"github.com/nbd-wtf/go-nostr"
+	"github.com/nbd-wtf/go-nostr/nip40"
+)
+
+type expiringEvent struct {
+	id        string
+	expiresAt nostr.Timestamp
+}
+
+type expiringEventHeap []expiringEvent
+
+func (h expiringEventHeap) Len() int           { return len(h) }
+func (h expiringEventHeap) Less(i, j int) bool { return h[i].expiresAt < h[j].expiresAt }
+func (h expiringEventHeap) Swap(i, j int)      { h[i], h[j] = h[j], h[i] }
+
+func (h *expiringEventHeap) Push(x interface{}) {
+	*h = append(*h, x.(expiringEvent))
+}
+
+func (h *expiringEventHeap) Pop() interface{} {
+	old := *h
+	n := len(old)
+	x := old[n-1]
+	*h = old[0 : n-1]
+	return x
+}
+
+type expirationManager struct {
+	events          expiringEventHeap
+	mu              sync.Mutex
+	relay           *Relay
+	interval        time.Duration
+	initialScanDone bool
+}
+
+func newExpirationManager(relay *Relay) *expirationManager {
+	return &expirationManager{
+		events:   make(expiringEventHeap, 0),
+		relay:    relay,
+		interval: time.Hour,
+	}
+}
+
+func (em *expirationManager) start(ctx context.Context) {
+	ticker := time.NewTicker(em.interval)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ctx.Done():
+			return
+		case <-ticker.C:
+			if !em.initialScanDone {
+				em.initialScan(ctx)
+				em.initialScanDone = true
+			}
+
+			em.checkExpiredEvents(ctx)
+		}
+	}
+}
+
+func (em *expirationManager) initialScan(ctx context.Context) {
+	em.mu.Lock()
+	defer em.mu.Unlock()
+
+	// query all events
+	for _, query := range em.relay.QueryEvents {
+		ch, err := query(ctx, nostr.Filter{})
+		if err != nil {
+			continue
+		}
+
+		for evt := range ch {
+			if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 {
+				heap.Push(&em.events, expiringEvent{
+					id:        evt.ID,
+					expiresAt: expiresAt,
+				})
+			}
+		}
+	}
+
+	heap.Init(&em.events)
+}
+
+func (em *expirationManager) checkExpiredEvents(ctx context.Context) {
+	em.mu.Lock()
+	defer em.mu.Unlock()
+
+	now := nostr.Now()
+
+	// keep deleting events from the heap as long as they're expired
+	for em.events.Len() > 0 {
+		next := em.events[0]
+		if now < next.expiresAt {
+			break
+		}
+
+		heap.Pop(&em.events)
+
+		for _, query := range em.relay.QueryEvents {
+			ch, err := query(ctx, nostr.Filter{IDs: []string{next.id}})
+			if err != nil {
+				continue
+			}
+
+			if evt := <-ch; evt != nil {
+				for _, del := range em.relay.DeleteEvent {
+					del(ctx, evt)
+				}
+			}
+			break
+		}
+	}
+}
+
+func (em *expirationManager) trackEvent(evt *nostr.Event) {
+	if expiresAt := nip40.GetExpiration(evt.Tags); expiresAt != -1 {
+		em.mu.Lock()
+		heap.Push(&em.events, expiringEvent{
+			id:        evt.ID,
+			expiresAt: expiresAt,
+		})
+		em.mu.Unlock()
+	}
+}
diff --git a/vendor/github.com/fiatjaf/khatru/handlers.go b/vendor/github.com/fiatjaf/khatru/handlers.go
index 5dfd3b5..4168ac3 100644
--- a/vendor/github.com/fiatjaf/khatru/handlers.go
+++ b/vendor/github.com/fiatjaf/khatru/handlers.go
@@ -24,10 +24,6 @@ import (
 
 // ServeHTTP implements http.Handler interface.
 func (rl *Relay) ServeHTTP(w http.ResponseWriter, r *http.Request) {
-	if rl.ServiceURL == "" {
-		rl.ServiceURL = getServiceBaseURL(r)
-	}
-
 	corsMiddleware := cors.New(cors.Options{
 		AllowedOrigins: []string{"*"},
 		AllowedMethods: []string{
@@ -319,7 +315,7 @@ func (rl *Relay) HandleWebsocket(w http.ResponseWriter, r *http.Request) {
 					id := string(*env)
 					rl.removeListenerId(ws, id)
 				case *nostr.AuthEnvelope:
-					wsBaseUrl := strings.Replace(rl.ServiceURL, "http", "ws", 1)
+					wsBaseUrl := strings.Replace(rl.getBaseURL(r), "http", "ws", 1)
 					if pubkey, ok := nip42.ValidateAuthEvent(&env.Event, ws.Challenge, wsBaseUrl); ok {
 						ws.AuthedPublicKey = pubkey
 						ws.authLock.Lock()
diff --git a/vendor/github.com/fiatjaf/khatru/helpers.go b/vendor/github.com/fiatjaf/khatru/helpers.go
index 05c0fea..e42583a 100644
--- a/vendor/github.com/fiatjaf/khatru/helpers.go
+++ b/vendor/github.com/fiatjaf/khatru/helpers.go
@@ -3,7 +3,6 @@ package khatru
 import (
 	"net"
 	"net/http"
-	"strconv"
 	"strings"
 
 	"github.com/nbd-wtf/go-nostr"
@@ -14,28 +13,6 @@ func isOlder(previous, next *nostr.Event) bool {
 		(previous.CreatedAt == next.CreatedAt && previous.ID > next.ID)
 }
 
-func getServiceBaseURL(r *http.Request) string {
-	host := r.Header.Get("X-Forwarded-Host")
-	if host == "" {
-		host = r.Host
-	}
-	proto := r.Header.Get("X-Forwarded-Proto")
-	if proto == "" {
-		if host == "localhost" {
-			proto = "http"
-		} else if strings.Index(host, ":") != -1 {
-			// has a port number
-			proto = "http"
-		} else if _, err := strconv.Atoi(strings.ReplaceAll(host, ".", "")); err == nil {
-			// it's a naked IP
-			proto = "http"
-		} else {
-			proto = "https"
-		}
-	}
-	return proto + "://" + host
-}
-
 var privateMasks = func() []net.IPNet {
 	privateCIDRs := []string{
 		"127.0.0.0/8",
diff --git a/vendor/github.com/fiatjaf/khatru/nip86.go b/vendor/github.com/fiatjaf/khatru/nip86.go
index 73b500e..a488468 100644
--- a/vendor/github.com/fiatjaf/khatru/nip86.go
+++ b/vendor/github.com/fiatjaf/khatru/nip86.go
@@ -80,7 +80,7 @@ func (rl *Relay) HandleNIP86(w http.ResponseWriter, r *http.Request) {
 			goto respond
 		}
 
-		if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || rl.ServiceURL != (*uTag)[1] {
+		if uTag := evt.Tags.GetFirst([]string{"u", ""}); uTag == nil || rl.getBaseURL(r) != (*uTag)[1] {
 			resp.Error = "invalid 'u' tag"
 			goto respond
 		} else if pht := evt.Tags.GetFirst([]string{"payload", hex.EncodeToString(payloadHash[:])}); pht == nil {
diff --git a/vendor/github.com/fiatjaf/khatru/policies/sane_defaults.go b/vendor/github.com/fiatjaf/khatru/policies/sane_defaults.go
index 249b14f..d76e9d9 100644
--- a/vendor/github.com/fiatjaf/khatru/policies/sane_defaults.go
+++ b/vendor/github.com/fiatjaf/khatru/policies/sane_defaults.go
@@ -9,7 +9,7 @@ import (
 func ApplySaneDefaults(relay *khatru.Relay) {
 	relay.RejectEvent = append(relay.RejectEvent,
 		RejectEventsWithBase64Media,
-		EventIPRateLimiter(2, time.Minute*3, 5),
+		EventIPRateLimiter(2, time.Minute*3, 10),
 	)
 
 	relay.RejectFilter = append(relay.RejectFilter,
@@ -18,6 +18,6 @@ func ApplySaneDefaults(relay *khatru.Relay) {
 	)
 
 	relay.RejectConnection = append(relay.RejectConnection,
-		ConnectionRateLimiter(1, time.Minute*5, 10),
+		ConnectionRateLimiter(1, time.Minute*5, 100),
 	)
 }
diff --git a/vendor/github.com/fiatjaf/khatru/relay.go b/vendor/github.com/fiatjaf/khatru/relay.go
index 5234e98..0c5c4f2 100644
--- a/vendor/github.com/fiatjaf/khatru/relay.go
+++ b/vendor/github.com/fiatjaf/khatru/relay.go
@@ -5,6 +5,8 @@ import (
 	"log"
 	"net/http"
 	"os"
+	"strconv"
+	"strings"
 	"sync"
 	"time"
 
@@ -15,13 +17,15 @@ import (
 )
 
 func NewRelay() *Relay {
+	ctx := context.Background()
+
 	rl := &Relay{
 		Log: log.New(os.Stderr, "[khatru-relay] ", log.LstdFlags),
 
 		Info: &nip11.RelayInformationDocument{
 			Software:      "https://github.com/fiatjaf/khatru",
 			Version:       "n/a",
-			SupportedNIPs: []any{1, 11, 42, 70, 86},
+			SupportedNIPs: []any{1, 11, 40, 42, 70, 86},
 		},
 
 		upgrader: websocket.Upgrader{
@@ -41,10 +45,14 @@ func NewRelay() *Relay {
 		MaxMessageSize: 512000,
 	}
 
+	rl.expirationManager = newExpirationManager(rl)
+	go rl.expirationManager.start(ctx)
+
 	return rl
 }
 
 type Relay struct {
+	// setting this variable overwrites the hackish workaround we do to try to figure out our own base URL
 	ServiceURL string
 
 	// hooks that will be called at various times
@@ -105,4 +113,33 @@ type Relay struct {
 	PongWait       time.Duration // Time allowed to read the next pong message from the peer.
 	PingPeriod     time.Duration // Send pings to peer with this period. Must be less than pongWait.
 	MaxMessageSize int64         // Maximum message size allowed from peer.
+
+	// NIP-40 expiration manager
+	expirationManager *expirationManager
+}
+
+func (rl *Relay) getBaseURL(r *http.Request) string {
+	if rl.ServiceURL != "" {
+		return rl.ServiceURL
+	}
+
+	host := r.Header.Get("X-Forwarded-Host")
+	if host == "" {
+		host = r.Host
+	}
+	proto := r.Header.Get("X-Forwarded-Proto")
+	if proto == "" {
+		if host == "localhost" {
+			proto = "http"
+		} else if strings.Index(host, ":") != -1 {
+			// has a port number
+			proto = "http"
+		} else if _, err := strconv.Atoi(strings.ReplaceAll(host, ".", "")); err == nil {
+			// it's a naked IP
+			proto = "http"
+		} else {
+			proto = "https"
+		}
+	}
+	return proto + "://" + host
 }
diff --git a/vendor/github.com/gobwas/httphead/LICENSE b/vendor/github.com/gobwas/httphead/LICENSE
deleted file mode 100644
index 2744317..0000000
--- a/vendor/github.com/gobwas/httphead/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017 Sergey Kamardin
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/gobwas/httphead/README.md b/vendor/github.com/gobwas/httphead/README.md
deleted file mode 100644
index 67a97fd..0000000
--- a/vendor/github.com/gobwas/httphead/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# httphead.[go](https://golang.org)
-
-[![GoDoc][godoc-image]][godoc-url] 
-
-> Tiny HTTP header value parsing library in go.
-
-## Overview
-
-This library contains low-level functions for scanning HTTP RFC2616 compatible header value grammars.
-
-## Install
-
-```shell
-    go get github.com/gobwas/httphead
-```
-
-## Example
-
-The example below shows how multiple-choise HTTP header value could be parsed with this library:
-
-```go
-	options, ok := httphead.ParseOptions([]byte(`foo;bar=1,baz`), nil)
-	fmt.Println(options, ok)
-	// Output: [{foo map[bar:1]} {baz map[]}] true
-```
-
-The low-level example below shows how to optimize keys skipping and selection
-of some key:
-
-```go
-	// The right part of full header line like:
-	// X-My-Header: key;foo=bar;baz,key;baz
-	header := []byte(`foo;a=0,foo;a=1,foo;a=2,foo;a=3`)
-
-	// We want to search key "foo" with an "a" parameter that equal to "2".
-	var (
-		foo = []byte(`foo`)
-		a   = []byte(`a`)
-		v   = []byte(`2`)
-	)
-	var found bool
-	httphead.ScanOptions(header, func(i int, key, param, value []byte) Control {
-		if !bytes.Equal(key, foo) {
-			return ControlSkip
-		}
-		if !bytes.Equal(param, a) {
-			if bytes.Equal(value, v) {
-				// Found it!
-				found = true
-				return ControlBreak
-			}
-			return ControlSkip
-		}
-		return ControlContinue
-	})
-```
-
-For more usage examples please see [docs][godoc-url] or package tests.
-
-[godoc-image]: https://godoc.org/github.com/gobwas/httphead?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/httphead
-[travis-image]: https://travis-ci.org/gobwas/httphead.svg?branch=master
-[travis-url]: https://travis-ci.org/gobwas/httphead
diff --git a/vendor/github.com/gobwas/httphead/cookie.go b/vendor/github.com/gobwas/httphead/cookie.go
deleted file mode 100644
index 05c9a1f..0000000
--- a/vendor/github.com/gobwas/httphead/cookie.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package httphead
-
-import (
-	"bytes"
-)
-
-// ScanCookie scans cookie pairs from data using DefaultCookieScanner.Scan()
-// method.
-func ScanCookie(data []byte, it func(key, value []byte) bool) bool {
-	return DefaultCookieScanner.Scan(data, it)
-}
-
-// DefaultCookieScanner is a CookieScanner which is used by ScanCookie().
-// Note that it is intended to have the same behavior as http.Request.Cookies()
-// has.
-var DefaultCookieScanner = CookieScanner{}
-
-// CookieScanner contains options for scanning cookie pairs.
-// See https://tools.ietf.org/html/rfc6265#section-4.1.1
-type CookieScanner struct {
-	// DisableNameValidation disables name validation of a cookie. If false,
-	// only RFC2616 "tokens" are accepted.
-	DisableNameValidation bool
-
-	// DisableValueValidation disables value validation of a cookie. If false,
-	// only RFC6265 "cookie-octet" characters are accepted.
-	//
-	// Note that Strict option also affects validation of a value.
-	//
-	// If Strict is false, then scanner begins to allow space and comma
-	// characters inside the value for better compatibility with non standard
-	// cookies implementations.
-	DisableValueValidation bool
-
-	// BreakOnPairError sets scanner to immediately return after first pair syntax
-	// validation error.
-	// If false, scanner will try to skip invalid pair bytes and go ahead.
-	BreakOnPairError bool
-
-	// Strict enables strict RFC6265 mode scanning. It affects name and value
-	// validation, as also some other rules.
-	// If false, it is intended to bring the same behavior as
-	// http.Request.Cookies().
-	Strict bool
-}
-
-// Scan maps data to name and value pairs. Usually data represents value of the
-// Cookie header.
-func (c CookieScanner) Scan(data []byte, it func(name, value []byte) bool) bool {
-	lexer := &Scanner{data: data}
-
-	const (
-		statePair = iota
-		stateBefore
-	)
-
-	state := statePair
-
-	for lexer.Buffered() > 0 {
-		switch state {
-		case stateBefore:
-			// Pairs separated by ";" and space, according to the RFC6265:
-			//   cookie-pair *( ";" SP cookie-pair )
-			//
-			// Cookie pairs MUST be separated by (";" SP). So our only option
-			// here is to fail as syntax error.
-			a, b := lexer.Peek2()
-			if a != ';' {
-				return false
-			}
-
-			state = statePair
-
-			advance := 1
-			if b == ' ' {
-				advance++
-			} else if c.Strict {
-				return false
-			}
-
-			lexer.Advance(advance)
-
-		case statePair:
-			if !lexer.FetchUntil(';') {
-				return false
-			}
-
-			var value []byte
-			name := lexer.Bytes()
-			if i := bytes.IndexByte(name, '='); i != -1 {
-				value = name[i+1:]
-				name = name[:i]
-			} else if c.Strict {
-				if !c.BreakOnPairError {
-					goto nextPair
-				}
-				return false
-			}
-
-			if !c.Strict {
-				trimLeft(name)
-			}
-			if !c.DisableNameValidation && !ValidCookieName(name) {
-				if !c.BreakOnPairError {
-					goto nextPair
-				}
-				return false
-			}
-
-			if !c.Strict {
-				value = trimRight(value)
-			}
-			value = stripQuotes(value)
-			if !c.DisableValueValidation && !ValidCookieValue(value, c.Strict) {
-				if !c.BreakOnPairError {
-					goto nextPair
-				}
-				return false
-			}
-
-			if !it(name, value) {
-				return true
-			}
-
-		nextPair:
-			state = stateBefore
-		}
-	}
-
-	return true
-}
-
-// ValidCookieValue reports whether given value is a valid RFC6265
-// "cookie-octet" bytes.
-//
-// cookie-octet = %x21 / %x23-2B / %x2D-3A / %x3C-5B / %x5D-7E
-//                ; US-ASCII characters excluding CTLs,
-//                ; whitespace DQUOTE, comma, semicolon,
-//                ; and backslash
-//
-// Note that the false strict parameter disables errors on space 0x20 and comma
-// 0x2c. This could be useful to bring some compatibility with non-compliant
-// clients/servers in the real world.
-// It acts the same as standard library cookie parser if strict is false.
-func ValidCookieValue(value []byte, strict bool) bool {
-	if len(value) == 0 {
-		return true
-	}
-	for _, c := range value {
-		switch c {
-		case '"', ';', '\\':
-			return false
-		case ',', ' ':
-			if strict {
-				return false
-			}
-		default:
-			if c <= 0x20 {
-				return false
-			}
-			if c >= 0x7f {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-// ValidCookieName reports wheter given bytes is a valid RFC2616 "token" bytes.
-func ValidCookieName(name []byte) bool {
-	for _, c := range name {
-		if !OctetTypes[c].IsToken() {
-			return false
-		}
-	}
-	return true
-}
-
-func stripQuotes(bts []byte) []byte {
-	if last := len(bts) - 1; last > 0 && bts[0] == '"' && bts[last] == '"' {
-		return bts[1:last]
-	}
-	return bts
-}
-
-func trimLeft(p []byte) []byte {
-	var i int
-	for i < len(p) && OctetTypes[p[i]].IsSpace() {
-		i++
-	}
-	return p[i:]
-}
-
-func trimRight(p []byte) []byte {
-	j := len(p)
-	for j > 0 && OctetTypes[p[j-1]].IsSpace() {
-		j--
-	}
-	return p[:j]
-}
diff --git a/vendor/github.com/gobwas/httphead/head.go b/vendor/github.com/gobwas/httphead/head.go
deleted file mode 100644
index a50e907..0000000
--- a/vendor/github.com/gobwas/httphead/head.go
+++ /dev/null
@@ -1,275 +0,0 @@
-package httphead
-
-import (
-	"bufio"
-	"bytes"
-)
-
-// Version contains protocol major and minor version.
-type Version struct {
-	Major int
-	Minor int
-}
-
-// RequestLine contains parameters parsed from the first request line.
-type RequestLine struct {
-	Method  []byte
-	URI     []byte
-	Version Version
-}
-
-// ResponseLine contains parameters parsed from the first response line.
-type ResponseLine struct {
-	Version Version
-	Status  int
-	Reason  []byte
-}
-
-// SplitRequestLine splits given slice of bytes into three chunks without
-// parsing.
-func SplitRequestLine(line []byte) (method, uri, version []byte) {
-	return split3(line, ' ')
-}
-
-// ParseRequestLine parses http request line like "GET / HTTP/1.0".
-func ParseRequestLine(line []byte) (r RequestLine, ok bool) {
-	var i int
-	for i = 0; i < len(line); i++ {
-		c := line[i]
-		if !OctetTypes[c].IsToken() {
-			if i > 0 && c == ' ' {
-				break
-			}
-			return
-		}
-	}
-	if i == len(line) {
-		return
-	}
-
-	var proto []byte
-	r.Method = line[:i]
-	r.URI, proto = split2(line[i+1:], ' ')
-	if len(r.URI) == 0 {
-		return
-	}
-	if major, minor, ok := ParseVersion(proto); ok {
-		r.Version.Major = major
-		r.Version.Minor = minor
-		return r, true
-	}
-
-	return r, false
-}
-
-// SplitResponseLine splits given slice of bytes into three chunks without
-// parsing.
-func SplitResponseLine(line []byte) (version, status, reason []byte) {
-	return split3(line, ' ')
-}
-
-// ParseResponseLine parses first response line into ResponseLine struct.
-func ParseResponseLine(line []byte) (r ResponseLine, ok bool) {
-	var (
-		proto  []byte
-		status []byte
-	)
-	proto, status, r.Reason = split3(line, ' ')
-	if major, minor, ok := ParseVersion(proto); ok {
-		r.Version.Major = major
-		r.Version.Minor = minor
-	} else {
-		return r, false
-	}
-	if n, ok := IntFromASCII(status); ok {
-		r.Status = n
-	} else {
-		return r, false
-	}
-	// TODO(gobwas): parse here r.Reason fot TEXT rule:
-	//   TEXT = <any OCTET except CTLs,
-	//           but including LWS>
-	return r, true
-}
-
-var (
-	httpVersion10     = []byte("HTTP/1.0")
-	httpVersion11     = []byte("HTTP/1.1")
-	httpVersionPrefix = []byte("HTTP/")
-)
-
-// ParseVersion parses major and minor version of HTTP protocol.
-// It returns parsed values and true if parse is ok.
-func ParseVersion(bts []byte) (major, minor int, ok bool) {
-	switch {
-	case bytes.Equal(bts, httpVersion11):
-		return 1, 1, true
-	case bytes.Equal(bts, httpVersion10):
-		return 1, 0, true
-	case len(bts) < 8:
-		return
-	case !bytes.Equal(bts[:5], httpVersionPrefix):
-		return
-	}
-
-	bts = bts[5:]
-
-	dot := bytes.IndexByte(bts, '.')
-	if dot == -1 {
-		return
-	}
-	major, ok = IntFromASCII(bts[:dot])
-	if !ok {
-		return
-	}
-	minor, ok = IntFromASCII(bts[dot+1:])
-	if !ok {
-		return
-	}
-
-	return major, minor, true
-}
-
-// ReadLine reads line from br. It reads until '\n' and returns bytes without
-// '\n' or '\r\n' at the end.
-// It returns err if and only if line does not end in '\n'. Note that read
-// bytes returned in any case of error.
-//
-// It is much like the textproto/Reader.ReadLine() except the thing that it
-// returns raw bytes, instead of string. That is, it avoids copying bytes read
-// from br.
-//
-// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
-// safe with future I/O operations on br.
-//
-// We could control I/O operations on br and do not need to make additional
-// copy for safety.
-func ReadLine(br *bufio.Reader) ([]byte, error) {
-	var line []byte
-	for {
-		bts, err := br.ReadSlice('\n')
-		if err == bufio.ErrBufferFull {
-			// Copy bytes because next read will discard them.
-			line = append(line, bts...)
-			continue
-		}
-		// Avoid copy of single read.
-		if line == nil {
-			line = bts
-		} else {
-			line = append(line, bts...)
-		}
-		if err != nil {
-			return line, err
-		}
-		// Size of line is at least 1.
-		// In other case bufio.ReadSlice() returns error.
-		n := len(line)
-		// Cut '\n' or '\r\n'.
-		if n > 1 && line[n-2] == '\r' {
-			line = line[:n-2]
-		} else {
-			line = line[:n-1]
-		}
-		return line, nil
-	}
-}
-
-// ParseHeaderLine parses HTTP header as key-value pair. It returns parsed
-// values and true if parse is ok.
-func ParseHeaderLine(line []byte) (k, v []byte, ok bool) {
-	colon := bytes.IndexByte(line, ':')
-	if colon == -1 {
-		return
-	}
-	k = trim(line[:colon])
-	for _, c := range k {
-		if !OctetTypes[c].IsToken() {
-			return nil, nil, false
-		}
-	}
-	v = trim(line[colon+1:])
-	return k, v, true
-}
-
-// IntFromASCII converts ascii encoded decimal numeric value from HTTP entities
-// to an integer.
-func IntFromASCII(bts []byte) (ret int, ok bool) {
-	// ASCII numbers all start with the high-order bits 0011.
-	// If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
-	// bits and interpret them directly as an integer.
-	var n int
-	if n = len(bts); n < 1 {
-		return 0, false
-	}
-	for i := 0; i < n; i++ {
-		if bts[i]&0xf0 != 0x30 {
-			return 0, false
-		}
-		ret += int(bts[i]&0xf) * pow(10, n-i-1)
-	}
-	return ret, true
-}
-
-const (
-	toLower = 'a' - 'A'      // for use with OR.
-	toUpper = ^byte(toLower) // for use with AND.
-)
-
-// CanonicalizeHeaderKey is like standard textproto/CanonicalMIMEHeaderKey,
-// except that it operates with slice of bytes and modifies it inplace without
-// copying.
-func CanonicalizeHeaderKey(k []byte) {
-	upper := true
-	for i, c := range k {
-		if upper && 'a' <= c && c <= 'z' {
-			k[i] &= toUpper
-		} else if !upper && 'A' <= c && c <= 'Z' {
-			k[i] |= toLower
-		}
-		upper = c == '-'
-	}
-}
-
-// pow for integers implementation.
-// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3
-func pow(a, b int) int {
-	p := 1
-	for b > 0 {
-		if b&1 != 0 {
-			p *= a
-		}
-		b >>= 1
-		a *= a
-	}
-	return p
-}
-
-func split3(p []byte, sep byte) (p1, p2, p3 []byte) {
-	a := bytes.IndexByte(p, sep)
-	b := bytes.IndexByte(p[a+1:], sep)
-	if a == -1 || b == -1 {
-		return p, nil, nil
-	}
-	b += a + 1
-	return p[:a], p[a+1 : b], p[b+1:]
-}
-
-func split2(p []byte, sep byte) (p1, p2 []byte) {
-	i := bytes.IndexByte(p, sep)
-	if i == -1 {
-		return p, nil
-	}
-	return p[:i], p[i+1:]
-}
-
-func trim(p []byte) []byte {
-	var i, j int
-	for i = 0; i < len(p) && (p[i] == ' ' || p[i] == '\t'); {
-		i++
-	}
-	for j = len(p); j > i && (p[j-1] == ' ' || p[j-1] == '\t'); {
-		j--
-	}
-	return p[i:j]
-}
diff --git a/vendor/github.com/gobwas/httphead/httphead.go b/vendor/github.com/gobwas/httphead/httphead.go
deleted file mode 100644
index 2387e80..0000000
--- a/vendor/github.com/gobwas/httphead/httphead.go
+++ /dev/null
@@ -1,331 +0,0 @@
-// Package httphead contains utils for parsing HTTP and HTTP-grammar compatible
-// text protocols headers.
-//
-// That is, this package first aim is to bring ability to easily parse
-// constructions, described here https://tools.ietf.org/html/rfc2616#section-2
-package httphead
-
-import (
-	"bytes"
-	"strings"
-)
-
-// ScanTokens parses data in this form:
-//
-// list = 1#token
-//
-// It returns false if data is malformed.
-func ScanTokens(data []byte, it func([]byte) bool) bool {
-	lexer := &Scanner{data: data}
-
-	var ok bool
-	for lexer.Next() {
-		switch lexer.Type() {
-		case ItemToken:
-			ok = true
-			if !it(lexer.Bytes()) {
-				return true
-			}
-		case ItemSeparator:
-			if !isComma(lexer.Bytes()) {
-				return false
-			}
-		default:
-			return false
-		}
-	}
-
-	return ok && !lexer.err
-}
-
-// ParseOptions parses all header options and appends it to given slice of
-// Option. It returns flag of successful (wellformed input) parsing.
-//
-// Note that appended options are all consist of subslices of data. That is,
-// mutation of data will mutate appended options.
-func ParseOptions(data []byte, options []Option) ([]Option, bool) {
-	var i int
-	index := -1
-	return options, ScanOptions(data, func(idx int, name, attr, val []byte) Control {
-		if idx != index {
-			index = idx
-			i = len(options)
-			options = append(options, Option{Name: name})
-		}
-		if attr != nil {
-			options[i].Parameters.Set(attr, val)
-		}
-		return ControlContinue
-	})
-}
-
-// SelectFlag encodes way of options selection.
-type SelectFlag byte
-
-// String represetns flag as string.
-func (f SelectFlag) String() string {
-	var flags [2]string
-	var n int
-	if f&SelectCopy != 0 {
-		flags[n] = "copy"
-		n++
-	}
-	if f&SelectUnique != 0 {
-		flags[n] = "unique"
-		n++
-	}
-	return "[" + strings.Join(flags[:n], "|") + "]"
-}
-
-const (
-	// SelectCopy causes selector to copy selected option before appending it
-	// to resulting slice.
-	// If SelectCopy flag is not passed to selector, then appended options will
-	// contain sub-slices of the initial data.
-	SelectCopy SelectFlag = 1 << iota
-
-	// SelectUnique causes selector to append only not yet existing option to
-	// resulting slice. Unique is checked by comparing option names.
-	SelectUnique
-)
-
-// OptionSelector contains configuration for selecting Options from header value.
-type OptionSelector struct {
-	// Check is a filter function that applied to every Option that possibly
-	// could be selected.
-	// If Check is nil all options will be selected.
-	Check func(Option) bool
-
-	// Flags contains flags for options selection.
-	Flags SelectFlag
-
-	// Alloc used to allocate slice of bytes when selector is configured with
-	// SelectCopy flag. It will be called with number of bytes needed for copy
-	// of single Option.
-	// If Alloc is nil make is used.
-	Alloc func(n int) []byte
-}
-
-// Select parses header data and appends it to given slice of Option.
-// It also returns flag of successful (wellformed input) parsing.
-func (s OptionSelector) Select(data []byte, options []Option) ([]Option, bool) {
-	var current Option
-	var has bool
-	index := -1
-
-	alloc := s.Alloc
-	if alloc == nil {
-		alloc = defaultAlloc
-	}
-	check := s.Check
-	if check == nil {
-		check = defaultCheck
-	}
-
-	ok := ScanOptions(data, func(idx int, name, attr, val []byte) Control {
-		if idx != index {
-			if has && check(current) {
-				if s.Flags&SelectCopy != 0 {
-					current = current.Copy(alloc(current.Size()))
-				}
-				options = append(options, current)
-				has = false
-			}
-			if s.Flags&SelectUnique != 0 {
-				for i := len(options) - 1; i >= 0; i-- {
-					if bytes.Equal(options[i].Name, name) {
-						return ControlSkip
-					}
-				}
-			}
-			index = idx
-			current = Option{Name: name}
-			has = true
-		}
-		if attr != nil {
-			current.Parameters.Set(attr, val)
-		}
-
-		return ControlContinue
-	})
-	if has && check(current) {
-		if s.Flags&SelectCopy != 0 {
-			current = current.Copy(alloc(current.Size()))
-		}
-		options = append(options, current)
-	}
-
-	return options, ok
-}
-
-func defaultAlloc(n int) []byte { return make([]byte, n) }
-func defaultCheck(Option) bool  { return true }
-
-// Control represents operation that scanner should perform.
-type Control byte
-
-const (
-	// ControlContinue causes scanner to continue scan tokens.
-	ControlContinue Control = iota
-	// ControlBreak causes scanner to stop scan tokens.
-	ControlBreak
-	// ControlSkip causes scanner to skip current entity.
-	ControlSkip
-)
-
-// ScanOptions parses data in this form:
-//
-// values = 1#value
-// value = token *( ";" param )
-// param = token [ "=" (token | quoted-string) ]
-//
-// It calls given callback with the index of the option, option itself and its
-// parameter (attribute and its value, both could be nil). Index is useful when
-// header contains multiple choises for the same named option.
-//
-// Given callback should return one of the defined Control* values.
-// ControlSkip means that passed key is not in caller's interest. That is, all
-// parameters of that key will be skipped.
-// ControlBreak means that no more keys and parameters should be parsed. That
-// is, it must break parsing immediately.
-// ControlContinue means that caller want to receive next parameter and its
-// value or the next key.
-//
-// It returns false if data is malformed.
-func ScanOptions(data []byte, it func(index int, option, attribute, value []byte) Control) bool {
-	lexer := &Scanner{data: data}
-
-	var ok bool
-	var state int
-	const (
-		stateKey = iota
-		stateParamBeforeName
-		stateParamName
-		stateParamBeforeValue
-		stateParamValue
-	)
-
-	var (
-		index             int
-		key, param, value []byte
-		mustCall          bool
-	)
-	for lexer.Next() {
-		var (
-			call      bool
-			growIndex int
-		)
-
-		t := lexer.Type()
-		v := lexer.Bytes()
-
-		switch t {
-		case ItemToken:
-			switch state {
-			case stateKey, stateParamBeforeName:
-				key = v
-				state = stateParamBeforeName
-				mustCall = true
-			case stateParamName:
-				param = v
-				state = stateParamBeforeValue
-				mustCall = true
-			case stateParamValue:
-				value = v
-				state = stateParamBeforeName
-				call = true
-			default:
-				return false
-			}
-
-		case ItemString:
-			if state != stateParamValue {
-				return false
-			}
-			value = v
-			state = stateParamBeforeName
-			call = true
-
-		case ItemSeparator:
-			switch {
-			case isComma(v) && state == stateKey:
-				// Nothing to do.
-
-			case isComma(v) && state == stateParamBeforeName:
-				state = stateKey
-				// Make call only if we have not called this key yet.
-				call = mustCall
-				if !call {
-					// If we have already called callback with the key
-					// that just ended.
-					index++
-				} else {
-					// Else grow the index after calling callback.
-					growIndex = 1
-				}
-
-			case isComma(v) && state == stateParamBeforeValue:
-				state = stateKey
-				growIndex = 1
-				call = true
-
-			case isSemicolon(v) && state == stateParamBeforeName:
-				state = stateParamName
-
-			case isSemicolon(v) && state == stateParamBeforeValue:
-				state = stateParamName
-				call = true
-
-			case isEquality(v) && state == stateParamBeforeValue:
-				state = stateParamValue
-
-			default:
-				return false
-			}
-
-		default:
-			return false
-		}
-
-		if call {
-			switch it(index, key, param, value) {
-			case ControlBreak:
-				// User want to stop to parsing parameters.
-				return true
-
-			case ControlSkip:
-				// User want to skip current param.
-				state = stateKey
-				lexer.SkipEscaped(',')
-
-			case ControlContinue:
-				// User is interested in rest of parameters.
-				// Nothing to do.
-
-			default:
-				panic("unexpected control value")
-			}
-			ok = true
-			param = nil
-			value = nil
-			mustCall = false
-			index += growIndex
-		}
-	}
-	if mustCall {
-		ok = true
-		it(index, key, param, value)
-	}
-
-	return ok && !lexer.err
-}
-
-func isComma(b []byte) bool {
-	return len(b) == 1 && b[0] == ','
-}
-func isSemicolon(b []byte) bool {
-	return len(b) == 1 && b[0] == ';'
-}
-func isEquality(b []byte) bool {
-	return len(b) == 1 && b[0] == '='
-}
diff --git a/vendor/github.com/gobwas/httphead/lexer.go b/vendor/github.com/gobwas/httphead/lexer.go
deleted file mode 100644
index 729855e..0000000
--- a/vendor/github.com/gobwas/httphead/lexer.go
+++ /dev/null
@@ -1,360 +0,0 @@
-package httphead
-
-import (
-	"bytes"
-)
-
-// ItemType encodes type of the lexing token.
-type ItemType int
-
-const (
-	// ItemUndef reports that token is undefined.
-	ItemUndef ItemType = iota
-	// ItemToken reports that token is RFC2616 token.
-	ItemToken
-	// ItemSeparator reports that token is RFC2616 separator.
-	ItemSeparator
-	// ItemString reports that token is RFC2616 quouted string.
-	ItemString
-	// ItemComment reports that token is RFC2616 comment.
-	ItemComment
-	// ItemOctet reports that token is octet slice.
-	ItemOctet
-)
-
-// Scanner represents header tokens scanner.
-// See https://tools.ietf.org/html/rfc2616#section-2
-type Scanner struct {
-	data []byte
-	pos  int
-
-	itemType  ItemType
-	itemBytes []byte
-
-	err bool
-}
-
-// NewScanner creates new RFC2616 data scanner.
-func NewScanner(data []byte) *Scanner {
-	return &Scanner{data: data}
-}
-
-// Next scans for next token. It returns true on successful scanning, and false
-// on error or EOF.
-func (l *Scanner) Next() bool {
-	c, ok := l.nextChar()
-	if !ok {
-		return false
-	}
-	switch c {
-	case '"': // quoted-string;
-		return l.fetchQuotedString()
-
-	case '(': // comment;
-		return l.fetchComment()
-
-	case '\\', ')': // unexpected chars;
-		l.err = true
-		return false
-
-	default:
-		return l.fetchToken()
-	}
-}
-
-// FetchUntil fetches ItemOctet from current scanner position to first
-// occurence of the c or to the end of the underlying data.
-func (l *Scanner) FetchUntil(c byte) bool {
-	l.resetItem()
-	if l.pos == len(l.data) {
-		return false
-	}
-	return l.fetchOctet(c)
-}
-
-// Peek reads byte at current position without advancing it. On end of data it
-// returns 0.
-func (l *Scanner) Peek() byte {
-	if l.pos == len(l.data) {
-		return 0
-	}
-	return l.data[l.pos]
-}
-
-// Peek2 reads two first bytes at current position without advancing it.
-// If there not enough data it returs 0.
-func (l *Scanner) Peek2() (a, b byte) {
-	if l.pos == len(l.data) {
-		return 0, 0
-	}
-	if l.pos+1 == len(l.data) {
-		return l.data[l.pos], 0
-	}
-	return l.data[l.pos], l.data[l.pos+1]
-}
-
-// Buffered reporst how many bytes there are left to scan.
-func (l *Scanner) Buffered() int {
-	return len(l.data) - l.pos
-}
-
-// Advance moves current position index at n bytes. It returns true on
-// successful move.
-func (l *Scanner) Advance(n int) bool {
-	l.pos += n
-	if l.pos > len(l.data) {
-		l.pos = len(l.data)
-		return false
-	}
-	return true
-}
-
-// Skip skips all bytes until first occurence of c.
-func (l *Scanner) Skip(c byte) {
-	if l.err {
-		return
-	}
-	// Reset scanner state.
-	l.resetItem()
-
-	if i := bytes.IndexByte(l.data[l.pos:], c); i == -1 {
-		// Reached the end of data.
-		l.pos = len(l.data)
-	} else {
-		l.pos += i + 1
-	}
-}
-
-// SkipEscaped skips all bytes until first occurence of non-escaped c.
-func (l *Scanner) SkipEscaped(c byte) {
-	if l.err {
-		return
-	}
-	// Reset scanner state.
-	l.resetItem()
-
-	if i := ScanUntil(l.data[l.pos:], c); i == -1 {
-		// Reached the end of data.
-		l.pos = len(l.data)
-	} else {
-		l.pos += i + 1
-	}
-}
-
-// Type reports current token type.
-func (l *Scanner) Type() ItemType {
-	return l.itemType
-}
-
-// Bytes returns current token bytes.
-func (l *Scanner) Bytes() []byte {
-	return l.itemBytes
-}
-
-func (l *Scanner) nextChar() (byte, bool) {
-	// Reset scanner state.
-	l.resetItem()
-
-	if l.err {
-		return 0, false
-	}
-	l.pos += SkipSpace(l.data[l.pos:])
-	if l.pos == len(l.data) {
-		return 0, false
-	}
-	return l.data[l.pos], true
-}
-
-func (l *Scanner) resetItem() {
-	l.itemType = ItemUndef
-	l.itemBytes = nil
-}
-
-func (l *Scanner) fetchOctet(c byte) bool {
-	i := l.pos
-	if j := bytes.IndexByte(l.data[l.pos:], c); j == -1 {
-		// Reached the end of data.
-		l.pos = len(l.data)
-	} else {
-		l.pos += j
-	}
-
-	l.itemType = ItemOctet
-	l.itemBytes = l.data[i:l.pos]
-
-	return true
-}
-
-func (l *Scanner) fetchToken() bool {
-	n, t := ScanToken(l.data[l.pos:])
-	if n == -1 {
-		l.err = true
-		return false
-	}
-
-	l.itemType = t
-	l.itemBytes = l.data[l.pos : l.pos+n]
-	l.pos += n
-
-	return true
-}
-
-func (l *Scanner) fetchQuotedString() (ok bool) {
-	l.pos++
-
-	n := ScanUntil(l.data[l.pos:], '"')
-	if n == -1 {
-		l.err = true
-		return false
-	}
-
-	l.itemType = ItemString
-	l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
-	l.pos += n + 1
-
-	return true
-}
-
-func (l *Scanner) fetchComment() (ok bool) {
-	l.pos++
-
-	n := ScanPairGreedy(l.data[l.pos:], '(', ')')
-	if n == -1 {
-		l.err = true
-		return false
-	}
-
-	l.itemType = ItemComment
-	l.itemBytes = RemoveByte(l.data[l.pos:l.pos+n], '\\')
-	l.pos += n + 1
-
-	return true
-}
-
-// ScanUntil scans for first non-escaped character c in given data.
-// It returns index of matched c and -1 if c is not found.
-func ScanUntil(data []byte, c byte) (n int) {
-	for {
-		i := bytes.IndexByte(data[n:], c)
-		if i == -1 {
-			return -1
-		}
-		n += i
-		if n == 0 || data[n-1] != '\\' {
-			break
-		}
-		n++
-	}
-	return
-}
-
-// ScanPairGreedy scans for complete pair of opening and closing chars in greedy manner.
-// Note that first opening byte must not be present in data.
-func ScanPairGreedy(data []byte, open, close byte) (n int) {
-	var m int
-	opened := 1
-	for {
-		i := bytes.IndexByte(data[n:], close)
-		if i == -1 {
-			return -1
-		}
-		n += i
-		// If found index is not escaped then it is the end.
-		if n == 0 || data[n-1] != '\\' {
-			opened--
-		}
-
-		for m < i {
-			j := bytes.IndexByte(data[m:i], open)
-			if j == -1 {
-				break
-			}
-			m += j + 1
-			opened++
-		}
-
-		if opened == 0 {
-			break
-		}
-
-		n++
-		m = n
-	}
-	return
-}
-
-// RemoveByte returns data without c. If c is not present in data it returns
-// the same slice. If not, it copies data without c.
-func RemoveByte(data []byte, c byte) []byte {
-	j := bytes.IndexByte(data, c)
-	if j == -1 {
-		return data
-	}
-
-	n := len(data) - 1
-
-	// If character is present, than allocate slice with n-1 capacity. That is,
-	// resulting bytes could be at most n-1 length.
-	result := make([]byte, n)
-	k := copy(result, data[:j])
-
-	for i := j + 1; i < n; {
-		j = bytes.IndexByte(data[i:], c)
-		if j != -1 {
-			k += copy(result[k:], data[i:i+j])
-			i = i + j + 1
-		} else {
-			k += copy(result[k:], data[i:])
-			break
-		}
-	}
-
-	return result[:k]
-}
-
-// SkipSpace skips spaces and lws-sequences from p.
-// It returns number ob bytes skipped.
-func SkipSpace(p []byte) (n int) {
-	for len(p) > 0 {
-		switch {
-		case len(p) >= 3 &&
-			p[0] == '\r' &&
-			p[1] == '\n' &&
-			OctetTypes[p[2]].IsSpace():
-			p = p[3:]
-			n += 3
-		case OctetTypes[p[0]].IsSpace():
-			p = p[1:]
-			n++
-		default:
-			return
-		}
-	}
-	return
-}
-
-// ScanToken scan for next token in p. It returns length of the token and its
-// type. It do not trim p.
-func ScanToken(p []byte) (n int, t ItemType) {
-	if len(p) == 0 {
-		return 0, ItemUndef
-	}
-
-	c := p[0]
-	switch {
-	case OctetTypes[c].IsSeparator():
-		return 1, ItemSeparator
-
-	case OctetTypes[c].IsToken():
-		for n = 1; n < len(p); n++ {
-			c := p[n]
-			if !OctetTypes[c].IsToken() {
-				break
-			}
-		}
-		return n, ItemToken
-
-	default:
-		return -1, ItemUndef
-	}
-}
diff --git a/vendor/github.com/gobwas/httphead/octet.go b/vendor/github.com/gobwas/httphead/octet.go
deleted file mode 100644
index 2a04cdd..0000000
--- a/vendor/github.com/gobwas/httphead/octet.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package httphead
-
-// OctetType desribes character type.
-//
-// From the "Basic Rules" chapter of RFC2616
-// See https://tools.ietf.org/html/rfc2616#section-2.2
-//
-// OCTET          = <any 8-bit sequence of data>
-// CHAR           = <any US-ASCII character (octets 0 - 127)>
-// UPALPHA        = <any US-ASCII uppercase letter "A".."Z">
-// LOALPHA        = <any US-ASCII lowercase letter "a".."z">
-// ALPHA          = UPALPHA | LOALPHA
-// DIGIT          = <any US-ASCII digit "0".."9">
-// CTL            = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
-// CR             = <US-ASCII CR, carriage return (13)>
-// LF             = <US-ASCII LF, linefeed (10)>
-// SP             = <US-ASCII SP, space (32)>
-// HT             = <US-ASCII HT, horizontal-tab (9)>
-// <">            = <US-ASCII double-quote mark (34)>
-// CRLF           = CR LF
-// LWS            = [CRLF] 1*( SP | HT )
-//
-// Many HTTP/1.1 header field values consist of words separated by LWS
-// or special characters. These special characters MUST be in a quoted
-// string to be used within a parameter value (as defined in section
-// 3.6).
-//
-// token          = 1*<any CHAR except CTLs or separators>
-// separators     = "(" | ")" | "<" | ">" | "@"
-// | "," | ";" | ":" | "\" | <">
-// | "/" | "[" | "]" | "?" | "="
-// | "{" | "}" | SP | HT
-type OctetType byte
-
-// IsChar reports whether octet is CHAR.
-func (t OctetType) IsChar() bool { return t&octetChar != 0 }
-
-// IsControl reports whether octet is CTL.
-func (t OctetType) IsControl() bool { return t&octetControl != 0 }
-
-// IsSeparator reports whether octet is separator.
-func (t OctetType) IsSeparator() bool { return t&octetSeparator != 0 }
-
-// IsSpace reports whether octet is space (SP or HT).
-func (t OctetType) IsSpace() bool { return t&octetSpace != 0 }
-
-// IsToken reports whether octet is token.
-func (t OctetType) IsToken() bool { return t&octetToken != 0 }
-
-const (
-	octetChar OctetType = 1 << iota
-	octetControl
-	octetSpace
-	octetSeparator
-	octetToken
-)
-
-// OctetTypes is a table of octets.
-var OctetTypes [256]OctetType
-
-func init() {
-	for c := 32; c < 256; c++ {
-		var t OctetType
-		if c <= 127 {
-			t |= octetChar
-		}
-		if 0 <= c && c <= 31 || c == 127 {
-			t |= octetControl
-		}
-		switch c {
-		case '(', ')', '<', '>', '@', ',', ';', ':', '"', '/', '[', ']', '?', '=', '{', '}', '\\':
-			t |= octetSeparator
-		case ' ', '\t':
-			t |= octetSpace | octetSeparator
-		}
-
-		if t.IsChar() && !t.IsControl() && !t.IsSeparator() && !t.IsSpace() {
-			t |= octetToken
-		}
-
-		OctetTypes[c] = t
-	}
-}
diff --git a/vendor/github.com/gobwas/httphead/option.go b/vendor/github.com/gobwas/httphead/option.go
deleted file mode 100644
index 0a18c7c..0000000
--- a/vendor/github.com/gobwas/httphead/option.go
+++ /dev/null
@@ -1,193 +0,0 @@
-package httphead
-
-import (
-	"bytes"
-	"sort"
-)
-
-// Option represents a header option.
-type Option struct {
-	Name       []byte
-	Parameters Parameters
-}
-
-// Size returns number of bytes need to be allocated for use in opt.Copy.
-func (opt Option) Size() int {
-	return len(opt.Name) + opt.Parameters.bytes
-}
-
-// Copy copies all underlying []byte slices into p and returns new Option.
-// Note that p must be at least of opt.Size() length.
-func (opt Option) Copy(p []byte) Option {
-	n := copy(p, opt.Name)
-	opt.Name = p[:n]
-	opt.Parameters, p = opt.Parameters.Copy(p[n:])
-	return opt
-}
-
-// Clone is a shorthand for making slice of opt.Size() sequenced with Copy()
-// call.
-func (opt Option) Clone() Option {
-	return opt.Copy(make([]byte, opt.Size()))
-}
-
-// String represents option as a string.
-func (opt Option) String() string {
-	return "{" + string(opt.Name) + " " + opt.Parameters.String() + "}"
-}
-
-// NewOption creates named option with given parameters.
-func NewOption(name string, params map[string]string) Option {
-	p := Parameters{}
-	for k, v := range params {
-		p.Set([]byte(k), []byte(v))
-	}
-	return Option{
-		Name:       []byte(name),
-		Parameters: p,
-	}
-}
-
-// Equal reports whether option is equal to b.
-func (opt Option) Equal(b Option) bool {
-	if bytes.Equal(opt.Name, b.Name) {
-		return opt.Parameters.Equal(b.Parameters)
-	}
-	return false
-}
-
-// Parameters represents option's parameters.
-type Parameters struct {
-	pos   int
-	bytes int
-	arr   [8]pair
-	dyn   []pair
-}
-
-// Equal reports whether a equal to b.
-func (p Parameters) Equal(b Parameters) bool {
-	switch {
-	case p.dyn == nil && b.dyn == nil:
-	case p.dyn != nil && b.dyn != nil:
-	default:
-		return false
-	}
-
-	ad, bd := p.data(), b.data()
-	if len(ad) != len(bd) {
-		return false
-	}
-
-	sort.Sort(pairs(ad))
-	sort.Sort(pairs(bd))
-
-	for i := 0; i < len(ad); i++ {
-		av, bv := ad[i], bd[i]
-		if !bytes.Equal(av.key, bv.key) || !bytes.Equal(av.value, bv.value) {
-			return false
-		}
-	}
-	return true
-}
-
-// Size returns number of bytes that needed to copy p.
-func (p *Parameters) Size() int {
-	return p.bytes
-}
-
-// Copy copies all underlying []byte slices into dst and returns new
-// Parameters.
-// Note that dst must be at least of p.Size() length.
-func (p *Parameters) Copy(dst []byte) (Parameters, []byte) {
-	ret := Parameters{
-		pos:   p.pos,
-		bytes: p.bytes,
-	}
-	if p.dyn != nil {
-		ret.dyn = make([]pair, len(p.dyn))
-		for i, v := range p.dyn {
-			ret.dyn[i], dst = v.copy(dst)
-		}
-	} else {
-		for i, p := range p.arr {
-			ret.arr[i], dst = p.copy(dst)
-		}
-	}
-	return ret, dst
-}
-
-// Get returns value by key and flag about existence such value.
-func (p *Parameters) Get(key string) (value []byte, ok bool) {
-	for _, v := range p.data() {
-		if string(v.key) == key {
-			return v.value, true
-		}
-	}
-	return nil, false
-}
-
-// Set sets value by key.
-func (p *Parameters) Set(key, value []byte) {
-	p.bytes += len(key) + len(value)
-
-	if p.pos < len(p.arr) {
-		p.arr[p.pos] = pair{key, value}
-		p.pos++
-		return
-	}
-
-	if p.dyn == nil {
-		p.dyn = make([]pair, len(p.arr), len(p.arr)+1)
-		copy(p.dyn, p.arr[:])
-	}
-	p.dyn = append(p.dyn, pair{key, value})
-}
-
-// ForEach iterates over parameters key-value pairs and calls cb for each one.
-func (p *Parameters) ForEach(cb func(k, v []byte) bool) {
-	for _, v := range p.data() {
-		if !cb(v.key, v.value) {
-			break
-		}
-	}
-}
-
-// String represents parameters as a string.
-func (p *Parameters) String() (ret string) {
-	ret = "["
-	for i, v := range p.data() {
-		if i > 0 {
-			ret += " "
-		}
-		ret += string(v.key) + ":" + string(v.value)
-	}
-	return ret + "]"
-}
-
-func (p *Parameters) data() []pair {
-	if p.dyn != nil {
-		return p.dyn
-	}
-	return p.arr[:p.pos]
-}
-
-type pair struct {
-	key, value []byte
-}
-
-func (p pair) copy(dst []byte) (pair, []byte) {
-	n := copy(dst, p.key)
-	p.key = dst[:n]
-	m := n + copy(dst[n:], p.value)
-	p.value = dst[n:m]
-
-	dst = dst[m:]
-
-	return p, dst
-}
-
-type pairs []pair
-
-func (p pairs) Len() int           { return len(p) }
-func (p pairs) Less(a, b int) bool { return bytes.Compare(p[a].key, p[b].key) == -1 }
-func (p pairs) Swap(a, b int)      { p[a], p[b] = p[b], p[a] }
diff --git a/vendor/github.com/gobwas/httphead/writer.go b/vendor/github.com/gobwas/httphead/writer.go
deleted file mode 100644
index e5df3dd..0000000
--- a/vendor/github.com/gobwas/httphead/writer.go
+++ /dev/null
@@ -1,101 +0,0 @@
-package httphead
-
-import "io"
-
-var (
-	comma     = []byte{','}
-	equality  = []byte{'='}
-	semicolon = []byte{';'}
-	quote     = []byte{'"'}
-	escape    = []byte{'\\'}
-)
-
-// WriteOptions write options list to the dest.
-// It uses the same form as {Scan,Parse}Options functions:
-// values = 1#value
-// value = token *( ";" param )
-// param = token [ "=" (token | quoted-string) ]
-//
-// It wraps valuse into the quoted-string sequence if it contains any
-// non-token characters.
-func WriteOptions(dest io.Writer, options []Option) (n int, err error) {
-	w := writer{w: dest}
-	for i, opt := range options {
-		if i > 0 {
-			w.write(comma)
-		}
-
-		writeTokenSanitized(&w, opt.Name)
-
-		for _, p := range opt.Parameters.data() {
-			w.write(semicolon)
-			writeTokenSanitized(&w, p.key)
-			if len(p.value) != 0 {
-				w.write(equality)
-				writeTokenSanitized(&w, p.value)
-			}
-		}
-	}
-	return w.result()
-}
-
-// writeTokenSanitized writes token as is or as quouted string if it contains
-// non-token characters.
-//
-// Note that is is not expects LWS sequnces be in s, cause LWS is used only as
-// header field continuation:
-// "A CRLF is allowed in the definition of TEXT only as part of a header field
-// continuation. It is expected that the folding LWS will be replaced with a
-// single SP before interpretation of the TEXT value."
-// See https://tools.ietf.org/html/rfc2616#section-2
-//
-// That is we sanitizing s for writing, so there could not be any header field
-// continuation.
-// That is any CRLF will be escaped as any other control characters not allowd in TEXT.
-func writeTokenSanitized(bw *writer, bts []byte) {
-	var qt bool
-	var pos int
-	for i := 0; i < len(bts); i++ {
-		c := bts[i]
-		if !OctetTypes[c].IsToken() && !qt {
-			qt = true
-			bw.write(quote)
-		}
-		if OctetTypes[c].IsControl() || c == '"' {
-			if !qt {
-				qt = true
-				bw.write(quote)
-			}
-			bw.write(bts[pos:i])
-			bw.write(escape)
-			bw.write(bts[i : i+1])
-			pos = i + 1
-		}
-	}
-	if !qt {
-		bw.write(bts)
-	} else {
-		bw.write(bts[pos:])
-		bw.write(quote)
-	}
-}
-
-type writer struct {
-	w   io.Writer
-	n   int
-	err error
-}
-
-func (w *writer) write(p []byte) {
-	if w.err != nil {
-		return
-	}
-	var n int
-	n, w.err = w.w.Write(p)
-	w.n += n
-	return
-}
-
-func (w *writer) result() (int, error) {
-	return w.n, w.err
-}
diff --git a/vendor/github.com/gobwas/pool/LICENSE b/vendor/github.com/gobwas/pool/LICENSE
deleted file mode 100644
index c41ffde..0000000
--- a/vendor/github.com/gobwas/pool/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017-2019 Sergey Kamardin <gobwas@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/gobwas/pool/README.md b/vendor/github.com/gobwas/pool/README.md
deleted file mode 100644
index 4568558..0000000
--- a/vendor/github.com/gobwas/pool/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-# pool
-
-[![GoDoc][godoc-image]][godoc-url]
-
-> Tiny memory reuse helpers for Go.
-
-## generic
-
-Without use of subpackages, `pool` allows to reuse any struct distinguishable
-by size in generic way:
-
-```go
-package main
-
-import "github.com/gobwas/pool"
-
-func main() {
-	x, n := pool.Get(100) // Returns object with size 128 or nil.
-	if x == nil {
-		// Create x somehow with knowledge that n is 128.
-	}
-	defer pool.Put(x, n)
-	
-	// Work with x.
-}
-```
-
-Pool allows you to pass specific options for constructing custom pool:
-
-```go
-package main
-
-import "github.com/gobwas/pool"
-
-func main() {
-	p := pool.Custom(
-        pool.WithLogSizeMapping(),      // Will ceil size n passed to Get(n) to nearest power of two.
-        pool.WithLogSizeRange(64, 512), // Will reuse objects in logarithmic range [64, 512].
-        pool.WithSize(65536),           // Will reuse object with size 65536.
-    )
-	x, n := p.Get(1000)  // Returns nil and 1000 because mapped size 1000 => 1024 is not reusing by the pool.
-    defer pool.Put(x, n) // Will not reuse x.
-	
-	// Work with x.
-}
-```
-
-Note that there are few non-generic pooling implementations inside subpackages.
-
-## pbytes
-
-Subpackage `pbytes` is intended for `[]byte` reuse.
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbytes"
-
-func main() {
-	bts := pbytes.GetCap(100) // Returns make([]byte, 0, 128).
-	defer pbytes.Put(bts)
-
-	// Work with bts.
-}
-```
-
-You can also create your own range for pooling:
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbytes"
-
-func main() {
-	// Reuse only slices whose capacity is 128, 256, 512 or 1024.
-	pool := pbytes.New(128, 1024) 
-
-	bts := pool.GetCap(100) // Returns make([]byte, 0, 128).
-	defer pool.Put(bts)
-
-	// Work with bts.
-}
-```
-
-## pbufio
-
-Subpackage `pbufio` is intended for `*bufio.{Reader, Writer}` reuse.
-
-```go
-package main
-
-import "github.com/gobwas/pool/pbufio"
-
-func main() {
-	bw := pbufio.GetWriter(os.Stdout, 100) // Returns bufio.NewWriterSize(128).
-	defer pbufio.PutWriter(bw)
-
-	// Work with bw.
-}
-```
-
-Like with `pbytes`, you can also create pool with custom reuse bounds.
-
-
-
-[godoc-image]: https://godoc.org/github.com/gobwas/pool?status.svg
-[godoc-url]:   https://godoc.org/github.com/gobwas/pool
diff --git a/vendor/github.com/gobwas/pool/generic.go b/vendor/github.com/gobwas/pool/generic.go
deleted file mode 100644
index d40b362..0000000
--- a/vendor/github.com/gobwas/pool/generic.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package pool
-
-import (
-	"sync"
-
-	"github.com/gobwas/pool/internal/pmath"
-)
-
-var DefaultPool = New(128, 65536)
-
-// Get pulls object whose generic size is at least of given size. It also
-// returns a real size of x for further pass to Put(). It returns -1 as real
-// size for nil x. Size >-1 does not mean that x is non-nil, so checks must be
-// done.
-//
-// Note that size could be ceiled to the next power of two.
-//
-// Get is a wrapper around DefaultPool.Get().
-func Get(size int) (interface{}, int) { return DefaultPool.Get(size) }
-
-// Put takes x and its size for future reuse.
-// Put is a wrapper around DefaultPool.Put().
-func Put(x interface{}, size int) { DefaultPool.Put(x, size) }
-
-// Pool contains logic of reusing objects distinguishable by size in generic
-// way.
-type Pool struct {
-	pool map[int]*sync.Pool
-	size func(int) int
-}
-
-// New creates new Pool that reuses objects which size is in logarithmic range
-// [min, max].
-//
-// Note that it is a shortcut for Custom() constructor with Options provided by
-// WithLogSizeMapping() and WithLogSizeRange(min, max) calls.
-func New(min, max int) *Pool {
-	return Custom(
-		WithLogSizeMapping(),
-		WithLogSizeRange(min, max),
-	)
-}
-
-// Custom creates new Pool with given options.
-func Custom(opts ...Option) *Pool {
-	p := &Pool{
-		pool: make(map[int]*sync.Pool),
-		size: pmath.Identity,
-	}
-
-	c := (*poolConfig)(p)
-	for _, opt := range opts {
-		opt(c)
-	}
-
-	return p
-}
-
-// Get pulls object whose generic size is at least of given size.
-// It also returns a real size of x for further pass to Put() even if x is nil.
-// Note that size could be ceiled to the next power of two.
-func (p *Pool) Get(size int) (interface{}, int) {
-	n := p.size(size)
-	if pool := p.pool[n]; pool != nil {
-		return pool.Get(), n
-	}
-	return nil, size
-}
-
-// Put takes x and its size for future reuse.
-func (p *Pool) Put(x interface{}, size int) {
-	if pool := p.pool[size]; pool != nil {
-		pool.Put(x)
-	}
-}
-
-type poolConfig Pool
-
-// AddSize adds size n to the map.
-func (p *poolConfig) AddSize(n int) {
-	p.pool[n] = new(sync.Pool)
-}
-
-// SetSizeMapping sets up incoming size mapping function.
-func (p *poolConfig) SetSizeMapping(size func(int) int) {
-	p.size = size
-}
diff --git a/vendor/github.com/gobwas/pool/internal/pmath/pmath.go b/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
deleted file mode 100644
index df152ed..0000000
--- a/vendor/github.com/gobwas/pool/internal/pmath/pmath.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package pmath
-
-const (
-	bitsize       = 32 << (^uint(0) >> 63)
-	maxint        = int(1<<(bitsize-1) - 1)
-	maxintHeadBit = 1 << (bitsize - 2)
-)
-
-// LogarithmicRange iterates from ceiled to power of two min to max,
-// calling cb on each iteration.
-func LogarithmicRange(min, max int, cb func(int)) {
-	if min == 0 {
-		min = 1
-	}
-	for n := CeilToPowerOfTwo(min); n <= max; n <<= 1 {
-		cb(n)
-	}
-}
-
-// IsPowerOfTwo reports whether given integer is a power of two.
-func IsPowerOfTwo(n int) bool {
-	return n&(n-1) == 0
-}
-
-// Identity is identity.
-func Identity(n int) int {
-	return n
-}
-
-// CeilToPowerOfTwo returns the least power of two integer value greater than
-// or equal to n.
-func CeilToPowerOfTwo(n int) int {
-	if n&maxintHeadBit != 0 && n > maxintHeadBit {
-		panic("argument is too large")
-	}
-	if n <= 2 {
-		return n
-	}
-	n--
-	n = fillBits(n)
-	n++
-	return n
-}
-
-// FloorToPowerOfTwo returns the greatest power of two integer value less than
-// or equal to n.
-func FloorToPowerOfTwo(n int) int {
-	if n <= 2 {
-		return n
-	}
-	n = fillBits(n)
-	n >>= 1
-	n++
-	return n
-}
-
-func fillBits(n int) int {
-	n |= n >> 1
-	n |= n >> 2
-	n |= n >> 4
-	n |= n >> 8
-	n |= n >> 16
-	n |= n >> 32
-	return n
-}
diff --git a/vendor/github.com/gobwas/pool/option.go b/vendor/github.com/gobwas/pool/option.go
deleted file mode 100644
index d6e42b7..0000000
--- a/vendor/github.com/gobwas/pool/option.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package pool
-
-import "github.com/gobwas/pool/internal/pmath"
-
-// Option configures pool.
-type Option func(Config)
-
-// Config describes generic pool configuration.
-type Config interface {
-	AddSize(n int)
-	SetSizeMapping(func(int) int)
-}
-
-// WithSizeLogRange returns an Option that will add logarithmic range of
-// pooling sizes containing [min, max] values.
-func WithLogSizeRange(min, max int) Option {
-	return func(c Config) {
-		pmath.LogarithmicRange(min, max, func(n int) {
-			c.AddSize(n)
-		})
-	}
-}
-
-// WithSize returns an Option that will add given pooling size to the pool.
-func WithSize(n int) Option {
-	return func(c Config) {
-		c.AddSize(n)
-	}
-}
-
-func WithSizeMapping(sz func(int) int) Option {
-	return func(c Config) {
-		c.SetSizeMapping(sz)
-	}
-}
-
-func WithLogSizeMapping() Option {
-	return WithSizeMapping(pmath.CeilToPowerOfTwo)
-}
-
-func WithIdentitySizeMapping() Option {
-	return WithSizeMapping(pmath.Identity)
-}
diff --git a/vendor/github.com/gobwas/pool/pbufio/pbufio.go b/vendor/github.com/gobwas/pool/pbufio/pbufio.go
deleted file mode 100644
index d526bd8..0000000
--- a/vendor/github.com/gobwas/pool/pbufio/pbufio.go
+++ /dev/null
@@ -1,106 +0,0 @@
-// Package pbufio contains tools for pooling bufio.Reader and bufio.Writers.
-package pbufio
-
-import (
-	"bufio"
-	"io"
-
-	"github.com/gobwas/pool"
-)
-
-var (
-	DefaultWriterPool = NewWriterPool(256, 65536)
-	DefaultReaderPool = NewReaderPool(256, 65536)
-)
-
-// GetWriter returns bufio.Writer whose buffer has at least size bytes.
-// Note that size could be ceiled to the next power of two.
-// GetWriter is a wrapper around DefaultWriterPool.Get().
-func GetWriter(w io.Writer, size int) *bufio.Writer { return DefaultWriterPool.Get(w, size) }
-
-// PutWriter takes bufio.Writer for future reuse.
-// It does not reuse bufio.Writer which underlying buffer size is not power of
-// PutWriter is a wrapper around DefaultWriterPool.Put().
-func PutWriter(bw *bufio.Writer) { DefaultWriterPool.Put(bw) }
-
-// GetReader returns bufio.Reader whose buffer has at least size bytes. It returns
-// its capacity for further pass to Put().
-// Note that size could be ceiled to the next power of two.
-// GetReader is a wrapper around DefaultReaderPool.Get().
-func GetReader(w io.Reader, size int) *bufio.Reader { return DefaultReaderPool.Get(w, size) }
-
-// PutReader takes bufio.Reader and its size for future reuse.
-// It does not reuse bufio.Reader if size is not power of two or is out of pool
-// min/max range.
-// PutReader is a wrapper around DefaultReaderPool.Put().
-func PutReader(bw *bufio.Reader) { DefaultReaderPool.Put(bw) }
-
-// WriterPool contains logic of *bufio.Writer reuse with various size.
-type WriterPool struct {
-	pool *pool.Pool
-}
-
-// NewWriterPool creates new WriterPool that reuses writers which size is in
-// logarithmic range [min, max].
-func NewWriterPool(min, max int) *WriterPool {
-	return &WriterPool{pool.New(min, max)}
-}
-
-// CustomWriterPool creates new WriterPool with given options.
-func CustomWriterPool(opts ...pool.Option) *WriterPool {
-	return &WriterPool{pool.Custom(opts...)}
-}
-
-// Get returns bufio.Writer whose buffer has at least size bytes.
-func (wp *WriterPool) Get(w io.Writer, size int) *bufio.Writer {
-	v, n := wp.pool.Get(size)
-	if v != nil {
-		bw := v.(*bufio.Writer)
-		bw.Reset(w)
-		return bw
-	}
-	return bufio.NewWriterSize(w, n)
-}
-
-// Put takes ownership of bufio.Writer for further reuse.
-func (wp *WriterPool) Put(bw *bufio.Writer) {
-	// Should reset even if we do Reset() inside Get().
-	// This is done to prevent locking underlying io.Writer from GC.
-	bw.Reset(nil)
-	wp.pool.Put(bw, writerSize(bw))
-}
-
-// ReaderPool contains logic of *bufio.Reader reuse with various size.
-type ReaderPool struct {
-	pool *pool.Pool
-}
-
-// NewReaderPool creates new ReaderPool that reuses writers which size is in
-// logarithmic range [min, max].
-func NewReaderPool(min, max int) *ReaderPool {
-	return &ReaderPool{pool.New(min, max)}
-}
-
-// CustomReaderPool creates new ReaderPool with given options.
-func CustomReaderPool(opts ...pool.Option) *ReaderPool {
-	return &ReaderPool{pool.Custom(opts...)}
-}
-
-// Get returns bufio.Reader whose buffer has at least size bytes.
-func (rp *ReaderPool) Get(r io.Reader, size int) *bufio.Reader {
-	v, n := rp.pool.Get(size)
-	if v != nil {
-		br := v.(*bufio.Reader)
-		br.Reset(r)
-		return br
-	}
-	return bufio.NewReaderSize(r, n)
-}
-
-// Put takes ownership of bufio.Reader for further reuse.
-func (rp *ReaderPool) Put(br *bufio.Reader) {
-	// Should reset even if we do Reset() inside Get().
-	// This is done to prevent locking underlying io.Reader from GC.
-	br.Reset(nil)
-	rp.pool.Put(br, readerSize(br))
-}
diff --git a/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go b/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
deleted file mode 100644
index c736ae5..0000000
--- a/vendor/github.com/gobwas/pool/pbufio/pbufio_go110.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build go1.10
-
-package pbufio
-
-import "bufio"
-
-func writerSize(bw *bufio.Writer) int {
-	return bw.Size()
-}
-
-func readerSize(br *bufio.Reader) int {
-	return br.Size()
-}
diff --git a/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go b/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
deleted file mode 100644
index e71dd44..0000000
--- a/vendor/github.com/gobwas/pool/pbufio/pbufio_go19.go
+++ /dev/null
@@ -1,27 +0,0 @@
-// +build !go1.10
-
-package pbufio
-
-import "bufio"
-
-func writerSize(bw *bufio.Writer) int {
-	return bw.Available() + bw.Buffered()
-}
-
-// readerSize returns buffer size of the given buffered reader.
-// NOTE: current workaround implementation resets underlying io.Reader.
-func readerSize(br *bufio.Reader) int {
-	br.Reset(sizeReader)
-	br.ReadByte()
-	n := br.Buffered() + 1
-	br.Reset(nil)
-	return n
-}
-
-var sizeReader optimisticReader
-
-type optimisticReader struct{}
-
-func (optimisticReader) Read(p []byte) (int, error) {
-	return len(p), nil
-}
diff --git a/vendor/github.com/gobwas/pool/pbytes/pbytes.go b/vendor/github.com/gobwas/pool/pbytes/pbytes.go
deleted file mode 100644
index 919705b..0000000
--- a/vendor/github.com/gobwas/pool/pbytes/pbytes.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Package pbytes contains tools for pooling byte pool.
-// Note that by default it reuse slices with capacity from 128 to 65536 bytes.
-package pbytes
-
-// DefaultPool is used by pacakge level functions.
-var DefaultPool = New(128, 65536)
-
-// Get returns probably reused slice of bytes with at least capacity of c and
-// exactly len of n.
-// Get is a wrapper around DefaultPool.Get().
-func Get(n, c int) []byte { return DefaultPool.Get(n, c) }
-
-// GetCap returns probably reused slice of bytes with at least capacity of n.
-// GetCap is a wrapper around DefaultPool.GetCap().
-func GetCap(c int) []byte { return DefaultPool.GetCap(c) }
-
-// GetLen returns probably reused slice of bytes with at least capacity of n
-// and exactly len of n.
-// GetLen is a wrapper around DefaultPool.GetLen().
-func GetLen(n int) []byte { return DefaultPool.GetLen(n) }
-
-// Put returns given slice to reuse pool.
-// Put is a wrapper around DefaultPool.Put().
-func Put(p []byte) { DefaultPool.Put(p) }
diff --git a/vendor/github.com/gobwas/pool/pbytes/pool.go b/vendor/github.com/gobwas/pool/pbytes/pool.go
deleted file mode 100644
index 1dde225..0000000
--- a/vendor/github.com/gobwas/pool/pbytes/pool.go
+++ /dev/null
@@ -1,59 +0,0 @@
-// +build !pool_sanitize
-
-package pbytes
-
-import "github.com/gobwas/pool"
-
-// Pool contains logic of reusing byte slices of various size.
-type Pool struct {
-	pool *pool.Pool
-}
-
-// New creates new Pool that reuses slices which size is in logarithmic range
-// [min, max].
-//
-// Note that it is a shortcut for Custom() constructor with Options provided by
-// pool.WithLogSizeMapping() and pool.WithLogSizeRange(min, max) calls.
-func New(min, max int) *Pool {
-	return &Pool{pool.New(min, max)}
-}
-
-// New creates new Pool with given options.
-func Custom(opts ...pool.Option) *Pool {
-	return &Pool{pool.Custom(opts...)}
-}
-
-// Get returns probably reused slice of bytes with at least capacity of c and
-// exactly len of n.
-func (p *Pool) Get(n, c int) []byte {
-	if n > c {
-		panic("requested length is greater than capacity")
-	}
-
-	v, x := p.pool.Get(c)
-	if v != nil {
-		bts := v.([]byte)
-		bts = bts[:n]
-		return bts
-	}
-
-	return make([]byte, n, x)
-}
-
-// Put returns given slice to reuse pool.
-// It does not reuse bytes whose size is not power of two or is out of pool
-// min/max range.
-func (p *Pool) Put(bts []byte) {
-	p.pool.Put(bts, cap(bts))
-}
-
-// GetCap returns probably reused slice of bytes with at least capacity of n.
-func (p *Pool) GetCap(c int) []byte {
-	return p.Get(0, c)
-}
-
-// GetLen returns probably reused slice of bytes with at least capacity of n
-// and exactly len of n.
-func (p *Pool) GetLen(n int) []byte {
-	return p.Get(n, n)
-}
diff --git a/vendor/github.com/gobwas/pool/pbytes/pool_sanitize.go b/vendor/github.com/gobwas/pool/pbytes/pool_sanitize.go
deleted file mode 100644
index fae9af4..0000000
--- a/vendor/github.com/gobwas/pool/pbytes/pool_sanitize.go
+++ /dev/null
@@ -1,121 +0,0 @@
-// +build pool_sanitize
-
-package pbytes
-
-import (
-	"reflect"
-	"runtime"
-	"sync/atomic"
-	"syscall"
-	"unsafe"
-
-	"golang.org/x/sys/unix"
-)
-
-const magic = uint64(0x777742)
-
-type guard struct {
-	magic  uint64
-	size   int
-	owners int32
-}
-
-const guardSize = int(unsafe.Sizeof(guard{}))
-
-type Pool struct {
-	min, max int
-}
-
-func New(min, max int) *Pool {
-	return &Pool{min, max}
-}
-
-// Get returns probably reused slice of bytes with at least capacity of c and
-// exactly len of n.
-func (p *Pool) Get(n, c int) []byte {
-	if n > c {
-		panic("requested length is greater than capacity")
-	}
-
-	pageSize := syscall.Getpagesize()
-	pages := (c+guardSize)/pageSize + 1
-	size := pages * pageSize
-
-	bts := alloc(size)
-
-	g := (*guard)(unsafe.Pointer(&bts[0]))
-	*g = guard{
-		magic:  magic,
-		size:   size,
-		owners: 1,
-	}
-
-	return bts[guardSize : guardSize+n]
-}
-
-func (p *Pool) GetCap(c int) []byte { return p.Get(0, c) }
-func (p *Pool) GetLen(n int) []byte { return Get(n, n) }
-
-// Put returns given slice to reuse pool.
-func (p *Pool) Put(bts []byte) {
-	hdr := *(*reflect.SliceHeader)(unsafe.Pointer(&bts))
-	ptr := hdr.Data - uintptr(guardSize)
-
-	g := (*guard)(unsafe.Pointer(ptr))
-	if g.magic != magic {
-		panic("unknown slice returned to the pool")
-	}
-	if n := atomic.AddInt32(&g.owners, -1); n < 0 {
-		panic("multiple Put() detected")
-	}
-
-	// Disable read and write on bytes memory pages. This will cause panic on
-	// incorrect access to returned slice.
-	mprotect(ptr, false, false, g.size)
-
-	runtime.SetFinalizer(&bts, func(b *[]byte) {
-		mprotect(ptr, true, true, g.size)
-		free(*(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{
-			Data: ptr,
-			Len:  g.size,
-			Cap:  g.size,
-		})))
-	})
-}
-
-func alloc(n int) []byte {
-	b, err := unix.Mmap(-1, 0, n, unix.PROT_READ|unix.PROT_WRITE|unix.PROT_EXEC, unix.MAP_SHARED|unix.MAP_ANONYMOUS)
-	if err != nil {
-		panic(err.Error())
-	}
-	return b
-}
-
-func free(b []byte) {
-	if err := unix.Munmap(b); err != nil {
-		panic(err.Error())
-	}
-}
-
-func mprotect(ptr uintptr, r, w bool, size int) {
-	// Need to avoid "EINVAL addr is not a valid pointer,
-	// or not a multiple of PAGESIZE."
-	start := ptr & ^(uintptr(syscall.Getpagesize() - 1))
-
-	prot := uintptr(syscall.PROT_EXEC)
-	switch {
-	case r && w:
-		prot |= syscall.PROT_READ | syscall.PROT_WRITE
-	case r:
-		prot |= syscall.PROT_READ
-	case w:
-		prot |= syscall.PROT_WRITE
-	}
-
-	_, _, err := syscall.Syscall(syscall.SYS_MPROTECT,
-		start, uintptr(size), prot,
-	)
-	if err != 0 {
-		panic(err.Error())
-	}
-}
diff --git a/vendor/github.com/gobwas/pool/pool.go b/vendor/github.com/gobwas/pool/pool.go
deleted file mode 100644
index 1fe9e60..0000000
--- a/vendor/github.com/gobwas/pool/pool.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Package pool contains helpers for pooling structures distinguishable by
-// size.
-//
-// Quick example:
-//
-//   import "github.com/gobwas/pool"
-//
-//   func main() {
-//      // Reuse objects in logarithmic range from 0 to 64 (0,1,2,4,6,8,16,32,64).
-//      p := pool.New(0, 64)
-//
-//      buf, n := p.Get(10) // Returns buffer with 16 capacity.
-//      if buf == nil {
-//          buf = bytes.NewBuffer(make([]byte, n))
-//      }
-//      defer p.Put(buf, n)
-//
-//      // Work with buf.
-//   }
-//
-// There are non-generic implementations for pooling:
-// - pool/pbytes for []byte reuse;
-// - pool/pbufio for *bufio.Reader and *bufio.Writer reuse;
-//
-package pool
diff --git a/vendor/github.com/gobwas/ws/.gitignore b/vendor/github.com/gobwas/ws/.gitignore
deleted file mode 100644
index e3e2b10..0000000
--- a/vendor/github.com/gobwas/ws/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-bin/
-reports/
-cpu.out
-mem.out
-ws.test
diff --git a/vendor/github.com/gobwas/ws/LICENSE b/vendor/github.com/gobwas/ws/LICENSE
deleted file mode 100644
index ca6dfd9..0000000
--- a/vendor/github.com/gobwas/ws/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2017-2021 Sergey Kamardin <gobwas@gmail.com>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/gobwas/ws/Makefile b/vendor/github.com/gobwas/ws/Makefile
deleted file mode 100644
index 6d89f78..0000000
--- a/vendor/github.com/gobwas/ws/Makefile
+++ /dev/null
@@ -1,54 +0,0 @@
-BENCH     ?=.
-BENCH_BASE?=master
-
-clean:
-	rm -f bin/reporter
-	rm -fr autobahn/report/*
-
-bin/reporter:
-	go build -o bin/reporter ./autobahn
-
-bin/gocovmerge:
-	go build -o bin/gocovmerge github.com/wadey/gocovmerge
-
-.PHONY: autobahn
-autobahn: clean bin/reporter 
-	./autobahn/script/test.sh --build --follow-logs
-	bin/reporter $(PWD)/autobahn/report/index.json
-
-.PHONY: autobahn/report
-autobahn/report: bin/reporter
-	./bin/reporter -http localhost:5555 ./autobahn/report/index.json
-
-test:
-	go test -coverprofile=ws.coverage .
-	go test -coverprofile=wsutil.coverage ./wsutil
-	go test -coverprofile=wsfalte.coverage ./wsflate
-	# No statements to cover in ./tests (there are only tests).
-	go test ./tests
-
-cover: bin/gocovmerge test autobahn
-	bin/gocovmerge ws.coverage wsutil.coverage wsflate.coverage autobahn/report/server.coverage > total.coverage
-
-benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD)
-benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX)
-benchcmp: BENCH_NEW:=$(shell mktemp -t new.XXXX)
-benchcmp:
-	if [ ! -z "$(shell git status -s)" ]; then\
-		echo "could not compare with $(BENCH_BASE) – found unstaged changes";\
-		exit 1;\
-	fi;\
-	if [ "$(BENCH_BRANCH)" == "$(BENCH_BASE)" ]; then\
-		echo "comparing the same branches";\
-		exit 1;\
-	fi;\
-	echo "benchmarking $(BENCH_BRANCH)...";\
-	go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_NEW);\
-	echo "benchmarking $(BENCH_BASE)...";\
-	git checkout -q $(BENCH_BASE);\
-	go test -run=none -bench=$(BENCH) -benchmem > $(BENCH_OLD);\
-	git checkout -q $(BENCH_BRANCH);\
-	echo "\nresults:";\
-	echo "========\n";\
-	benchcmp $(BENCH_OLD) $(BENCH_NEW);\
-
diff --git a/vendor/github.com/gobwas/ws/README.md b/vendor/github.com/gobwas/ws/README.md
deleted file mode 100644
index 0bd0f6b..0000000
--- a/vendor/github.com/gobwas/ws/README.md
+++ /dev/null
@@ -1,541 +0,0 @@
-# ws
-
-[![GoDoc][godoc-image]][godoc-url]
-[![CI][ci-badge]][ci-url]
-
-> [RFC6455][rfc-url] WebSocket implementation in Go.
-
-# Features
-
-- Zero-copy upgrade
-- No intermediate allocations during I/O
-- Low-level API which allows to build your own logic of packet handling and
-  buffers reuse
-- High-level wrappers and helpers around API in `wsutil` package, which allow
-  to start fast without digging the protocol internals
-
-# Documentation
-
-[GoDoc][godoc-url].
-
-# Why
-
-Existing WebSocket implementations do not allow users to reuse I/O buffers
-between connections in clear way. This library aims to export efficient
-low-level interface for working with the protocol without forcing only one way
-it could be used.
-
-By the way, if you want get the higher-level tools, you can use `wsutil`
-package.
-
-# Status
-
-Library is tagged as `v1*` so its API must not be broken during some
-improvements or refactoring.
-
-This implementation of RFC6455 passes [Autobahn Test
-Suite](https://github.com/crossbario/autobahn-testsuite) and currently has
-about 78% coverage.
-
-# Examples
-
-Example applications using `ws` are developed in separate repository
-[ws-examples](https://github.com/gobwas/ws-examples).
-
-# Usage
-
-The higher-level example of WebSocket echo server:
-
-```go
-package main
-
-import (
-	"net/http"
-
-	"github.com/gobwas/ws"
-	"github.com/gobwas/ws/wsutil"
-)
-
-func main() {
-	http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		conn, _, _, err := ws.UpgradeHTTP(r, w)
-		if err != nil {
-			// handle error
-		}
-		go func() {
-			defer conn.Close()
-
-			for {
-				msg, op, err := wsutil.ReadClientData(conn)
-				if err != nil {
-					// handle error
-				}
-				err = wsutil.WriteServerMessage(conn, op, msg)
-				if err != nil {
-					// handle error
-				}
-			}
-		}()
-	}))
-}
-```
-
-Lower-level, but still high-level example:
-
-
-```go
-import (
-	"net/http"
-	"io"
-
-	"github.com/gobwas/ws"
-	"github.com/gobwas/ws/wsutil"
-)
-
-func main() {
-	http.ListenAndServe(":8080", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		conn, _, _, err := ws.UpgradeHTTP(r, w)
-		if err != nil {
-			// handle error
-		}
-		go func() {
-			defer conn.Close()
-
-			var (
-				state  = ws.StateServerSide
-				reader = wsutil.NewReader(conn, state)
-				writer = wsutil.NewWriter(conn, state, ws.OpText)
-			)
-			for {
-				header, err := reader.NextFrame()
-				if err != nil {
-					// handle error
-				}
-
-				// Reset writer to write frame with right operation code.
-				writer.Reset(conn, state, header.OpCode)
-
-				if _, err = io.Copy(writer, reader); err != nil {
-					// handle error
-				}
-				if err = writer.Flush(); err != nil {
-					// handle error
-				}
-			}
-		}()
-	}))
-}
-```
-
-We can apply the same pattern to read and write structured responses through a JSON encoder and decoder.:
-
-```go
-	...
-	var (
-		r = wsutil.NewReader(conn, ws.StateServerSide)
-		w = wsutil.NewWriter(conn, ws.StateServerSide, ws.OpText)
-		decoder = json.NewDecoder(r)
-		encoder = json.NewEncoder(w)
-	)
-	for {
-		hdr, err = r.NextFrame()
-		if err != nil {
-			return err
-		}
-		if hdr.OpCode == ws.OpClose {
-			return io.EOF
-		}
-		var req Request
-		if err := decoder.Decode(&req); err != nil {
-			return err
-		}
-		var resp Response
-		if err := encoder.Encode(&resp); err != nil {
-			return err
-		}
-		if err = w.Flush(); err != nil {
-			return err
-		}
-	}
-	...
-```
-
-The lower-level example without `wsutil`:
-
-```go
-package main
-
-import (
-	"net"
-	"io"
-
-	"github.com/gobwas/ws"
-)
-
-func main() {
-	ln, err := net.Listen("tcp", "localhost:8080")
-	if err != nil {
-		log.Fatal(err)
-	}
-
-	for {
-		conn, err := ln.Accept()
-		if err != nil {
-			// handle error
-		}
-		_, err = ws.Upgrade(conn)
-		if err != nil {
-			// handle error
-		}
-
-		go func() {
-			defer conn.Close()
-
-			for {
-				header, err := ws.ReadHeader(conn)
-				if err != nil {
-					// handle error
-				}
-
-				payload := make([]byte, header.Length)
-				_, err = io.ReadFull(conn, payload)
-				if err != nil {
-					// handle error
-				}
-				if header.Masked {
-					ws.Cipher(payload, header.Mask, 0)
-				}
-
-				// Reset the Masked flag, server frames must not be masked as
-				// RFC6455 says.
-				header.Masked = false
-
-				if err := ws.WriteHeader(conn, header); err != nil {
-					// handle error
-				}
-				if _, err := conn.Write(payload); err != nil {
-					// handle error
-				}
-
-				if header.OpCode == ws.OpClose {
-					return
-				}
-			}
-		}()
-	}
-}
-```
-
-# Zero-copy upgrade
-
-Zero-copy upgrade helps to avoid unnecessary allocations and copying while
-handling HTTP Upgrade request.
-
-Processing of all non-websocket headers is made in place with use of registered
-user callbacks whose arguments are only valid until callback returns.
-
-The simple example looks like this:
-
-```go
-package main
-
-import (
-	"net"
-	"log"
-
-	"github.com/gobwas/ws"
-)
-
-func main() {
-	ln, err := net.Listen("tcp", "localhost:8080")
-	if err != nil {
-		log.Fatal(err)
-	}
-	u := ws.Upgrader{
-		OnHeader: func(key, value []byte) (err error) {
-			log.Printf("non-websocket header: %q=%q", key, value)
-			return
-		},
-	}
-	for {
-		conn, err := ln.Accept()
-		if err != nil {
-			// handle error
-		}
-
-		_, err = u.Upgrade(conn)
-		if err != nil {
-			// handle error
-		}
-	}
-}
-```
-
-Usage of `ws.Upgrader` here brings ability to control incoming connections on
-tcp level and simply not to accept them by some logic.
-
-Zero-copy upgrade is for high-load services which have to control many
-resources such as connections buffers.
-
-The real life example could be like this:
-
-```go
-package main
-
-import (
-	"fmt"
-	"io"
-	"log"
-	"net"
-	"net/http"
-	"runtime"
-
-	"github.com/gobwas/httphead"
-	"github.com/gobwas/ws"
-)
-
-func main() {
-	ln, err := net.Listen("tcp", "localhost:8080")
-	if err != nil {
-		// handle error
-	}
-
-	// Prepare handshake header writer from http.Header mapping.
-	header := ws.HandshakeHeaderHTTP(http.Header{
-		"X-Go-Version": []string{runtime.Version()},
-	})
-
-	u := ws.Upgrader{
-		OnHost: func(host []byte) error {
-			if string(host) == "github.com" {
-				return nil
-			}
-			return ws.RejectConnectionError(
-				ws.RejectionStatus(403),
-				ws.RejectionHeader(ws.HandshakeHeaderString(
-					"X-Want-Host: github.com\r\n",
-				)),
-			)
-		},
-		OnHeader: func(key, value []byte) error {
-			if string(key) != "Cookie" {
-				return nil
-			}
-			ok := httphead.ScanCookie(value, func(key, value []byte) bool {
-				// Check session here or do some other stuff with cookies.
-				// Maybe copy some values for future use.
-				return true
-			})
-			if ok {
-				return nil
-			}
-			return ws.RejectConnectionError(
-				ws.RejectionReason("bad cookie"),
-				ws.RejectionStatus(400),
-			)
-		},
-		OnBeforeUpgrade: func() (ws.HandshakeHeader, error) {
-			return header, nil
-		},
-	}
-	for {
-		conn, err := ln.Accept()
-		if err != nil {
-			log.Fatal(err)
-		}
-		_, err = u.Upgrade(conn)
-		if err != nil {
-			log.Printf("upgrade error: %s", err)
-		}
-	}
-}
-```
-
-# Compression
-
-There is a `ws/wsflate` package to support [Permessage-Deflate Compression
-Extension][rfc-pmce].
-
-It provides minimalistic I/O wrappers to be used in conjunction with any
-deflate implementation (for example, the standard library's
-[compress/flate][compress/flate]).
-
-It is also compatible with `wsutil`'s reader and writer by providing
-`wsflate.MessageState` type, which implements `wsutil.SendExtension` and
-`wsutil.RecvExtension` interfaces.
-
-```go
-package main
-
-import (
-	"bytes"
-	"log"
-	"net"
-
-	"github.com/gobwas/ws"
-	"github.com/gobwas/ws/wsflate"
-)
-
-func main() {
-	ln, err := net.Listen("tcp", "localhost:8080")
-	if err != nil {
-		// handle error
-	}
-	e := wsflate.Extension{
-		// We are using default parameters here since we use
-		// wsflate.{Compress,Decompress}Frame helpers below in the code.
-		// This assumes that we use standard compress/flate package as flate
-		// implementation.
-		Parameters: wsflate.DefaultParameters,
-	}
-	u := ws.Upgrader{
-		Negotiate: e.Negotiate,
-	}
-	for {
-		conn, err := ln.Accept()
-		if err != nil {
-			log.Fatal(err)
-		}
-
-		// Reset extension after previous upgrades.
-		e.Reset()
-
-		_, err = u.Upgrade(conn)
-		if err != nil {
-			log.Printf("upgrade error: %s", err)
-			continue
-		}
-		if _, ok := e.Accepted(); !ok {
-			log.Printf("didn't negotiate compression for %s", conn.RemoteAddr())
-			conn.Close()
-			continue
-		}
-
-		go func() {
-			defer conn.Close()
-			for {
-				frame, err := ws.ReadFrame(conn)
-				if err != nil {
-					// Handle error.
-					return
-				}
-
-				frame = ws.UnmaskFrameInPlace(frame)
-
-				if wsflate.IsCompressed(frame.Header) {
-					// Note that even after successful negotiation of
-					// compression extension, both sides are able to send
-					// non-compressed messages.
-					frame, err = wsflate.DecompressFrame(frame)
-					if err != nil {
-						// Handle error.
-						return
-					}
-				}
-
-				// Do something with frame...
-
-				ack := ws.NewTextFrame([]byte("this is an acknowledgement"))
-
-				// Compress response unconditionally.
-				ack, err = wsflate.CompressFrame(ack)
-				if err != nil {
-					// Handle error.
-					return
-				}
-				if err = ws.WriteFrame(conn, ack); err != nil {
-					// Handle error.
-					return
-				}
-			}
-		}()
-	}
-}
-```
-
-You can use compression with `wsutil` package this way:
-
-```go
-	// Upgrade somehow and negotiate compression to get the conn...
-
-	// Initialize flate reader. We are using nil as a source io.Reader because
-	// we will Reset() it in the message i/o loop below.
-	fr := wsflate.NewReader(nil, func(r io.Reader) wsflate.Decompressor {
-		return flate.NewReader(r)
-	})
-	// Initialize flate writer. We are using nil as a destination io.Writer
-	// because we will Reset() it in the message i/o loop below.
-	fw := wsflate.NewWriter(nil, func(w io.Writer) wsflate.Compressor {
-		f, _ := flate.NewWriter(w, 9)
-		return f
-	})
-
-	// Declare compression message state variable.
-	//
-	// It has two goals:
-	// - Allow users to check whether received message is compressed or not.
-	// - Help wsutil.Reader and wsutil.Writer to set/unset appropriate
-	//   WebSocket header bits while writing next frame to the wire (it
-	//   implements wsutil.RecvExtension and wsutil.SendExtension).
-	var msg wsflate.MessageState
-
-	// Initialize WebSocket reader as previously. 
-	// Please note the use of Reader.Extensions field as well as
-	// of ws.StateExtended flag.
-	rd := &wsutil.Reader{
-		Source:     conn,
-		State:      ws.StateServerSide | ws.StateExtended,
-		Extensions: []wsutil.RecvExtension{
-			&msg, 
-		},
-	}
-
-	// Initialize WebSocket writer with ws.StateExtended flag as well.
-	wr := wsutil.NewWriter(conn, ws.StateServerSide|ws.StateExtended, 0)
-	// Use the message state as wsutil.SendExtension.
-	wr.SetExtensions(&msg)
-
-	for {
-		h, err := rd.NextFrame()
-		if err != nil {
-			// handle error.
-		}
-		if h.OpCode.IsControl() {
-			// handle control frame.
-		}
-		if !msg.IsCompressed() {
-			// handle uncompressed frame (skipped for the sake of example
-			// simplicity).
-		}
-
-		// Reset the writer to echo same op code.
-		wr.Reset(h.OpCode)
-
-		// Reset both flate reader and writer to start the new round of i/o.
-		fr.Reset(rd)
-		fw.Reset(wr)
-
-		// Copy whole message from reader to writer decompressing it and
-		// compressing again.
-		if _, err := io.Copy(fw, fr); err != nil {
-			// handle error.
-		}
-		// Flush any remaining buffers from flate writer to WebSocket writer.
-		if err := fw.Close(); err != nil {
-			// handle error.
-		}
-		// Flush the whole WebSocket message to the wire.
-		if err := wr.Flush(); err != nil {
-			// handle error.
-		}
-	}
-```
-
-
-[rfc-url]: https://tools.ietf.org/html/rfc6455
-[rfc-pmce]: https://tools.ietf.org/html/rfc7692#section-7
-[godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg
-[godoc-url]: https://godoc.org/github.com/gobwas/ws
-[compress/flate]: https://golang.org/pkg/compress/flate/
-[ci-badge]:    https://github.com/gobwas/ws/workflows/CI/badge.svg
-[ci-url]:      https://github.com/gobwas/ws/actions?query=workflow%3ACI
diff --git a/vendor/github.com/gobwas/ws/check.go b/vendor/github.com/gobwas/ws/check.go
deleted file mode 100644
index 8aa0df8..0000000
--- a/vendor/github.com/gobwas/ws/check.go
+++ /dev/null
@@ -1,145 +0,0 @@
-package ws
-
-import "unicode/utf8"
-
-// State represents state of websocket endpoint.
-// It used by some functions to be more strict when checking compatibility with RFC6455.
-type State uint8
-
-const (
-	// StateServerSide means that endpoint (caller) is a server.
-	StateServerSide State = 0x1 << iota
-	// StateClientSide means that endpoint (caller) is a client.
-	StateClientSide
-	// StateExtended means that extension was negotiated during handshake.
-	StateExtended
-	// StateFragmented means that endpoint (caller) has received fragmented
-	// frame and waits for continuation parts.
-	StateFragmented
-)
-
-// Is checks whether the s has v enabled.
-func (s State) Is(v State) bool {
-	return uint8(s)&uint8(v) != 0
-}
-
-// Set enables v state on s.
-func (s State) Set(v State) State {
-	return s | v
-}
-
-// Clear disables v state on s.
-func (s State) Clear(v State) State {
-	return s & (^v)
-}
-
-// ServerSide reports whether states represents server side.
-func (s State) ServerSide() bool { return s.Is(StateServerSide) }
-
-// ClientSide reports whether state represents client side.
-func (s State) ClientSide() bool { return s.Is(StateClientSide) }
-
-// Extended reports whether state is extended.
-func (s State) Extended() bool { return s.Is(StateExtended) }
-
-// Fragmented reports whether state is fragmented.
-func (s State) Fragmented() bool { return s.Is(StateFragmented) }
-
-// ProtocolError describes error during checking/parsing websocket frames or
-// headers.
-type ProtocolError string
-
-// Error implements error interface.
-func (p ProtocolError) Error() string { return string(p) }
-
-// Errors used by the protocol checkers.
-var (
-	ErrProtocolOpCodeReserved             = ProtocolError("use of reserved op code")
-	ErrProtocolControlPayloadOverflow     = ProtocolError("control frame payload limit exceeded")
-	ErrProtocolControlNotFinal            = ProtocolError("control frame is not final")
-	ErrProtocolNonZeroRsv                 = ProtocolError("non-zero rsv bits with no extension negotiated")
-	ErrProtocolMaskRequired               = ProtocolError("frames from client to server must be masked")
-	ErrProtocolMaskUnexpected             = ProtocolError("frames from server to client must be not masked")
-	ErrProtocolContinuationExpected       = ProtocolError("unexpected non-continuation data frame")
-	ErrProtocolContinuationUnexpected     = ProtocolError("unexpected continuation data frame")
-	ErrProtocolStatusCodeNotInUse         = ProtocolError("status code is not in use")
-	ErrProtocolStatusCodeApplicationLevel = ProtocolError("status code is only application level")
-	ErrProtocolStatusCodeNoMeaning        = ProtocolError("status code has no meaning yet")
-	ErrProtocolStatusCodeUnknown          = ProtocolError("status code is not defined in spec")
-	ErrProtocolInvalidUTF8                = ProtocolError("invalid utf8 sequence in close reason")
-)
-
-// CheckHeader checks h to contain valid header data for given state s.
-//
-// Note that zero state (0) means that state is clean,
-// neither server or client side, nor fragmented, nor extended.
-func CheckHeader(h Header, s State) error {
-	if h.OpCode.IsReserved() {
-		return ErrProtocolOpCodeReserved
-	}
-	if h.OpCode.IsControl() {
-		if h.Length > MaxControlFramePayloadSize {
-			return ErrProtocolControlPayloadOverflow
-		}
-		if !h.Fin {
-			return ErrProtocolControlNotFinal
-		}
-	}
-
-	switch {
-	// [RFC6455]: MUST be 0 unless an extension is negotiated that defines meanings for
-	// non-zero values. If a nonzero value is received and none of the
-	// negotiated extensions defines the meaning of such a nonzero value, the
-	// receiving endpoint MUST _Fail the WebSocket Connection_.
-	case h.Rsv != 0 && !s.Extended():
-		return ErrProtocolNonZeroRsv
-
-	// [RFC6455]: The server MUST close the connection upon receiving a frame that is not masked.
-	// In this case, a server MAY send a Close frame with a status code of 1002 (protocol error)
-	// as defined in Section 7.4.1. A server MUST NOT mask any frames that it sends to the client.
-	// A client MUST close a connection if it detects a masked frame. In this case, it MAY use the
-	// status code 1002 (protocol error) as defined in Section 7.4.1.
-	case s.ServerSide() && !h.Masked:
-		return ErrProtocolMaskRequired
-	case s.ClientSide() && h.Masked:
-		return ErrProtocolMaskUnexpected
-
-	// [RFC6455]: See detailed explanation in 5.4 section.
-	case s.Fragmented() && !h.OpCode.IsControl() && h.OpCode != OpContinuation:
-		return ErrProtocolContinuationExpected
-	case !s.Fragmented() && h.OpCode == OpContinuation:
-		return ErrProtocolContinuationUnexpected
-
-	default:
-		return nil
-	}
-}
-
-// CheckCloseFrameData checks received close information
-// to be valid RFC6455 compatible close info.
-//
-// Note that code.Empty() or code.IsAppLevel() will raise error.
-//
-// If endpoint sends close frame without status code (with frame.Length = 0),
-// application should not check its payload.
-func CheckCloseFrameData(code StatusCode, reason string) error {
-	switch {
-	case code.IsNotUsed():
-		return ErrProtocolStatusCodeNotInUse
-
-	case code.IsProtocolReserved():
-		return ErrProtocolStatusCodeApplicationLevel
-
-	case code == StatusNoMeaningYet:
-		return ErrProtocolStatusCodeNoMeaning
-
-	case code.IsProtocolSpec() && !code.IsProtocolDefined():
-		return ErrProtocolStatusCodeUnknown
-
-	case !utf8.ValidString(reason):
-		return ErrProtocolInvalidUTF8
-
-	default:
-		return nil
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/cipher.go b/vendor/github.com/gobwas/ws/cipher.go
deleted file mode 100644
index ffe4161..0000000
--- a/vendor/github.com/gobwas/ws/cipher.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package ws
-
-import (
-	"encoding/binary"
-)
-
-// Cipher applies XOR cipher to the payload using mask.
-// Offset is used to cipher chunked data (e.g. in io.Reader implementations).
-//
-// To convert masked data into unmasked data, or vice versa, the following
-// algorithm is applied.  The same algorithm applies regardless of the
-// direction of the translation, e.g., the same steps are applied to
-// mask the data as to unmask the data.
-func Cipher(payload []byte, mask [4]byte, offset int) {
-	n := len(payload)
-	if n < 8 {
-		for i := 0; i < n; i++ {
-			payload[i] ^= mask[(offset+i)%4]
-		}
-		return
-	}
-
-	// Calculate position in mask due to previously processed bytes number.
-	mpos := offset % 4
-	// Count number of bytes will processed one by one from the beginning of payload.
-	ln := remain[mpos]
-	// Count number of bytes will processed one by one from the end of payload.
-	// This is done to process payload by 16 bytes in each iteration of main loop.
-	rn := (n - ln) % 16
-
-	for i := 0; i < ln; i++ {
-		payload[i] ^= mask[(mpos+i)%4]
-	}
-	for i := n - rn; i < n; i++ {
-		payload[i] ^= mask[(mpos+i)%4]
-	}
-
-	// NOTE: we use here binary.LittleEndian regardless of what is real
-	// endianness on machine is. To do so, we have to use binary.LittleEndian in
-	// the masking loop below as well.
-	var (
-		m  = binary.LittleEndian.Uint32(mask[:])
-		m2 = uint64(m)<<32 | uint64(m)
-	)
-	// Skip already processed right part.
-	// Get number of uint64 parts remaining to process.
-	n = (n - ln - rn) >> 4
-	j := ln
-	for i := 0; i < n; i++ {
-		chunk := payload[j : j+16]
-		p := binary.LittleEndian.Uint64(chunk) ^ m2
-		p2 := binary.LittleEndian.Uint64(chunk[8:]) ^ m2
-		binary.LittleEndian.PutUint64(chunk, p)
-		binary.LittleEndian.PutUint64(chunk[8:], p2)
-		j += 16
-	}
-}
-
-// remain maps position in masking key [0,4) to number
-// of bytes that need to be processed manually inside Cipher().
-var remain = [4]int{0, 3, 2, 1}
diff --git a/vendor/github.com/gobwas/ws/dialer.go b/vendor/github.com/gobwas/ws/dialer.go
deleted file mode 100644
index e66678e..0000000
--- a/vendor/github.com/gobwas/ws/dialer.go
+++ /dev/null
@@ -1,573 +0,0 @@
-package ws
-
-import (
-	"bufio"
-	"bytes"
-	"context"
-	"crypto/tls"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"net/url"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/gobwas/httphead"
-	"github.com/gobwas/pool/pbufio"
-)
-
-// Constants used by Dialer.
-const (
-	DefaultClientReadBufferSize  = 4096
-	DefaultClientWriteBufferSize = 4096
-)
-
-// Handshake represents handshake result.
-type Handshake struct {
-	// Protocol is the subprotocol selected during handshake.
-	Protocol string
-
-	// Extensions is the list of negotiated extensions.
-	Extensions []httphead.Option
-}
-
-// Errors used by the websocket client.
-var (
-	ErrHandshakeBadStatus      = fmt.Errorf("unexpected http status")
-	ErrHandshakeBadSubProtocol = fmt.Errorf("unexpected protocol in %q header", headerSecProtocol)
-	ErrHandshakeBadExtensions  = fmt.Errorf("unexpected extensions in %q header", headerSecProtocol)
-)
-
-// DefaultDialer is dialer that holds no options and is used by Dial function.
-var DefaultDialer Dialer
-
-// Dial is like Dialer{}.Dial().
-func Dial(ctx context.Context, urlstr string) (net.Conn, *bufio.Reader, Handshake, error) {
-	return DefaultDialer.Dial(ctx, urlstr)
-}
-
-// Dialer contains options for establishing websocket connection to an url.
-type Dialer struct {
-	// ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
-	// They used to read and write http data while upgrading to WebSocket.
-	// Allocated buffers are pooled with sync.Pool to avoid extra allocations.
-	//
-	// If a size is zero then default value is used.
-	ReadBufferSize, WriteBufferSize int
-
-	// Timeout is the maximum amount of time a Dial() will wait for a connect
-	// and an handshake to complete.
-	//
-	// The default is no timeout.
-	Timeout time.Duration
-
-	// Protocols is the list of subprotocols that the client wants to speak,
-	// ordered by preference.
-	//
-	// See https://tools.ietf.org/html/rfc6455#section-4.1
-	Protocols []string
-
-	// Extensions is the list of extensions that client wants to speak.
-	//
-	// Note that if server decides to use some of this extensions, Dial() will
-	// return Handshake struct containing a slice of items, which are the
-	// shallow copies of the items from this list. That is, internals of
-	// Extensions items are shared during Dial().
-	//
-	// See https://tools.ietf.org/html/rfc6455#section-4.1
-	// See https://tools.ietf.org/html/rfc6455#section-9.1
-	Extensions []httphead.Option
-
-	// Header is an optional HandshakeHeader instance that could be used to
-	// write additional headers to the handshake request.
-	//
-	// It used instead of any key-value mappings to avoid allocations in user
-	// land.
-	Header HandshakeHeader
-
-	// Host is an optional string that could be used to specify the host during
-	// HTTP upgrade request by setting 'Host' header.
-	//
-	// Default value is an empty string, which results in setting 'Host' header
-	// equal to the URL hostname given to Dialer.Dial().
-	Host string
-
-	// OnStatusError is the callback that will be called after receiving non
-	// "101 Continue" HTTP response status. It receives an io.Reader object
-	// representing server response bytes. That is, it gives ability to parse
-	// HTTP response somehow (probably with http.ReadResponse call) and make a
-	// decision of further logic.
-	//
-	// The arguments are only valid until the callback returns.
-	OnStatusError func(status int, reason []byte, resp io.Reader)
-
-	// OnHeader is the callback that will be called after successful parsing of
-	// header, that is not used during WebSocket handshake procedure. That is,
-	// it will be called with non-websocket headers, which could be relevant
-	// for application-level logic.
-	//
-	// The arguments are only valid until the callback returns.
-	//
-	// Returned value could be used to prevent processing response.
-	OnHeader func(key, value []byte) (err error)
-
-	// NetDial is the function that is used to get plain tcp connection.
-	// If it is not nil, then it is used instead of net.Dialer.
-	NetDial func(ctx context.Context, network, addr string) (net.Conn, error)
-
-	// TLSClient is the callback that will be called after successful dial with
-	// received connection and its remote host name. If it is nil, then the
-	// default tls.Client() will be used.
-	// If it is not nil, then TLSConfig field is ignored.
-	TLSClient func(conn net.Conn, hostname string) net.Conn
-
-	// TLSConfig is passed to tls.Client() to start TLS over established
-	// connection. If TLSClient is not nil, then it is ignored. If TLSConfig is
-	// non-nil and its ServerName is empty, then for every Dial() it will be
-	// cloned and appropriate ServerName will be set.
-	TLSConfig *tls.Config
-
-	// WrapConn is the optional callback that will be called when connection is
-	// ready for an i/o. That is, it will be called after successful dial and
-	// TLS initialization (for "wss" schemes). It may be helpful for different
-	// user land purposes such as end to end encryption.
-	//
-	// Note that for debugging purposes of an http handshake (e.g. sent request
-	// and received response), there is an wsutil.DebugDialer struct.
-	WrapConn func(conn net.Conn) net.Conn
-}
-
-// Dial connects to the url host and upgrades connection to WebSocket.
-//
-// If server has sent frames right after successful handshake then returned
-// buffer will be non-nil. In other cases buffer is always nil. For better
-// memory efficiency received non-nil bufio.Reader should be returned to the
-// inner pool with PutReader() function after use.
-//
-// Note that Dialer does not implement IDNA (RFC5895) logic as net/http does.
-// If you want to dial non-ascii host name, take care of its name serialization
-// avoiding bad request issues. For more info see net/http Request.Write()
-// implementation, especially cleanHost() function.
-func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) {
-	u, err := url.ParseRequestURI(urlstr)
-	if err != nil {
-		return nil, nil, hs, err
-	}
-
-	// Prepare context to dial with. Initially it is the same as original, but
-	// if d.Timeout is non-zero and points to time that is before ctx.Deadline,
-	// we use more shorter context for dial.
-	dialctx := ctx
-
-	var deadline time.Time
-	if t := d.Timeout; t != 0 {
-		deadline = time.Now().Add(t)
-		if d, ok := ctx.Deadline(); !ok || deadline.Before(d) {
-			var cancel context.CancelFunc
-			dialctx, cancel = context.WithDeadline(ctx, deadline)
-			defer cancel()
-		}
-	}
-	if conn, err = d.dial(dialctx, u); err != nil {
-		return conn, nil, hs, err
-	}
-	defer func() {
-		if err != nil {
-			conn.Close()
-		}
-	}()
-	if ctx == context.Background() {
-		// No need to start I/O interrupter goroutine which is not zero-cost.
-		conn.SetDeadline(deadline)
-		defer conn.SetDeadline(noDeadline)
-	} else {
-		// Context could be canceled or its deadline could be exceeded.
-		// Start the interrupter goroutine to handle context cancelation.
-		done := setupContextDeadliner(ctx, conn)
-		defer func() {
-			// Map Upgrade() error to a possible context expiration error. That
-			// is, even if Upgrade() err is nil, context could be already
-			// expired and connection be "poisoned" by SetDeadline() call.
-			// In that case we must not return ctx.Err() error.
-			done(&err)
-		}()
-	}
-
-	br, hs, err = d.Upgrade(conn, u)
-
-	return conn, br, hs, err
-}
-
-var (
-	// netEmptyDialer is a net.Dialer without options, used in Dialer.dial() if
-	// Dialer.NetDial is not provided.
-	netEmptyDialer net.Dialer
-	// tlsEmptyConfig is an empty tls.Config used as default one.
-	tlsEmptyConfig tls.Config
-)
-
-func tlsDefaultConfig() *tls.Config {
-	return &tlsEmptyConfig
-}
-
-func hostport(host, defaultPort string) (hostname, addr string) {
-	var (
-		colon   = strings.LastIndexByte(host, ':')
-		bracket = strings.IndexByte(host, ']')
-	)
-	if colon > bracket {
-		return host[:colon], host
-	}
-	return host, host + defaultPort
-}
-
-func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) {
-	dial := d.NetDial
-	if dial == nil {
-		dial = netEmptyDialer.DialContext
-	}
-	switch u.Scheme {
-	case "ws":
-		_, addr := hostport(u.Host, ":80")
-		conn, err = dial(ctx, "tcp", addr)
-	case "wss":
-		hostname, addr := hostport(u.Host, ":443")
-		conn, err = dial(ctx, "tcp", addr)
-		if err != nil {
-			return nil, err
-		}
-		tlsClient := d.TLSClient
-		if tlsClient == nil {
-			tlsClient = d.tlsClient
-		}
-		conn = tlsClient(conn, hostname)
-	default:
-		return nil, fmt.Errorf("unexpected websocket scheme: %q", u.Scheme)
-	}
-	if wrap := d.WrapConn; wrap != nil {
-		conn = wrap(conn)
-	}
-	return conn, err
-}
-
-func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn {
-	config := d.TLSConfig
-	if config == nil {
-		config = tlsDefaultConfig()
-	}
-	if config.ServerName == "" {
-		config = tlsCloneConfig(config)
-		config.ServerName = hostname
-	}
-	// Do not make conn.Handshake() here because downstairs we will prepare
-	// i/o on this conn with proper context's timeout handling.
-	return tls.Client(conn, config)
-}
-
-var (
-	// This variables are set like in net/net.go.
-	// noDeadline is just zero value for readability.
-	noDeadline = time.Time{}
-	// aLongTimeAgo is a non-zero time, far in the past, used for immediate
-	// cancelation of dials.
-	aLongTimeAgo = time.Unix(42, 0)
-)
-
-// Upgrade writes an upgrade request to the given io.ReadWriter conn at given
-// url u and reads a response from it.
-//
-// It is a caller responsibility to manage I/O deadlines on conn.
-//
-// It returns handshake info and some bytes which could be written by the peer
-// right after response and be caught by us during buffered read.
-func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Handshake, err error) {
-	// headerSeen constants helps to report whether or not some header was seen
-	// during reading request bytes.
-	const (
-		headerSeenUpgrade = 1 << iota
-		headerSeenConnection
-		headerSeenSecAccept
-
-		// headerSeenAll is the value that we expect to receive at the end of
-		// headers read/parse loop.
-		headerSeenAll = 0 |
-			headerSeenUpgrade |
-			headerSeenConnection |
-			headerSeenSecAccept
-	)
-
-	br = pbufio.GetReader(conn,
-		nonZero(d.ReadBufferSize, DefaultClientReadBufferSize),
-	)
-	bw := pbufio.GetWriter(conn,
-		nonZero(d.WriteBufferSize, DefaultClientWriteBufferSize),
-	)
-	defer func() {
-		pbufio.PutWriter(bw)
-		if br.Buffered() == 0 || err != nil {
-			// Server does not wrote additional bytes to the connection or
-			// error occurred. That is, no reason to return buffer.
-			pbufio.PutReader(br)
-			br = nil
-		}
-	}()
-
-	nonce := make([]byte, nonceSize)
-	initNonce(nonce)
-
-	httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header, d.Host)
-	if err := bw.Flush(); err != nil {
-		return br, hs, err
-	}
-
-	// Read HTTP status line like "HTTP/1.1 101 Switching Protocols".
-	sl, err := readLine(br)
-	if err != nil {
-		return br, hs, err
-	}
-	// Begin validation of the response.
-	// See https://tools.ietf.org/html/rfc6455#section-4.2.2
-	// Parse request line data like HTTP version, uri and method.
-	resp, err := httpParseResponseLine(sl)
-	if err != nil {
-		return br, hs, err
-	}
-	// Even if RFC says "1.1 or higher" without mentioning the part of the
-	// version, we apply it only to minor part.
-	if resp.major != 1 || resp.minor < 1 {
-		err = ErrHandshakeBadProtocol
-		return br, hs, err
-	}
-	if resp.status != http.StatusSwitchingProtocols {
-		err = StatusError(resp.status)
-		if onStatusError := d.OnStatusError; onStatusError != nil {
-			// Invoke callback with multireader of status-line bytes br.
-			onStatusError(resp.status, resp.reason,
-				io.MultiReader(
-					bytes.NewReader(sl),
-					strings.NewReader(crlf),
-					br,
-				),
-			)
-		}
-		return br, hs, err
-	}
-	// If response status is 101 then we expect all technical headers to be
-	// valid. If not, then we stop processing response without giving user
-	// ability to read non-technical headers. That is, we do not distinguish
-	// technical errors (such as parsing error) and protocol errors.
-	var headerSeen byte
-	for {
-		line, e := readLine(br)
-		if e != nil {
-			err = e
-			return br, hs, err
-		}
-		if len(line) == 0 {
-			// Blank line, no more lines to read.
-			break
-		}
-
-		k, v, ok := httpParseHeaderLine(line)
-		if !ok {
-			err = ErrMalformedResponse
-			return br, hs, err
-		}
-
-		switch btsToString(k) {
-		case headerUpgradeCanonical:
-			headerSeen |= headerSeenUpgrade
-			if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
-				err = ErrHandshakeBadUpgrade
-				return br, hs, err
-			}
-
-		case headerConnectionCanonical:
-			headerSeen |= headerSeenConnection
-			// Note that as RFC6455 says:
-			//   > A |Connection| header field with value "Upgrade".
-			// That is, in server side, "Connection" header could contain
-			// multiple token. But in response it must contains exactly one.
-			if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) {
-				err = ErrHandshakeBadConnection
-				return br, hs, err
-			}
-
-		case headerSecAcceptCanonical:
-			headerSeen |= headerSeenSecAccept
-			if !checkAcceptFromNonce(v, nonce) {
-				err = ErrHandshakeBadSecAccept
-				return br, hs, err
-			}
-
-		case headerSecProtocolCanonical:
-			// RFC6455 1.3:
-			//   "The server selects one or none of the acceptable protocols
-			//   and echoes that value in its handshake to indicate that it has
-			//   selected that protocol."
-			for _, want := range d.Protocols {
-				if string(v) == want {
-					hs.Protocol = want
-					break
-				}
-			}
-			if hs.Protocol == "" {
-				// Server echoed subprotocol that is not present in client
-				// requested protocols.
-				err = ErrHandshakeBadSubProtocol
-				return br, hs, err
-			}
-
-		case headerSecExtensionsCanonical:
-			hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions)
-			if err != nil {
-				return br, hs, err
-			}
-
-		default:
-			if onHeader := d.OnHeader; onHeader != nil {
-				if e := onHeader(k, v); e != nil {
-					err = e
-					return br, hs, err
-				}
-			}
-		}
-	}
-	if err == nil && headerSeen != headerSeenAll {
-		switch {
-		case headerSeen&headerSeenUpgrade == 0:
-			err = ErrHandshakeBadUpgrade
-		case headerSeen&headerSeenConnection == 0:
-			err = ErrHandshakeBadConnection
-		case headerSeen&headerSeenSecAccept == 0:
-			err = ErrHandshakeBadSecAccept
-		default:
-			panic("unknown headers state")
-		}
-	}
-	return br, hs, err
-}
-
-// PutReader returns bufio.Reader instance to the inner reuse pool.
-// It is useful in rare cases, when Dialer.Dial() returns non-nil buffer which
-// contains unprocessed buffered data, that was sent by the server quickly
-// right after handshake.
-func PutReader(br *bufio.Reader) {
-	pbufio.PutReader(br)
-}
-
-// StatusError contains an unexpected status-line code from the server.
-type StatusError int
-
-func (s StatusError) Error() string {
-	return "unexpected HTTP response status: " + strconv.Itoa(int(s))
-}
-
-func isTimeoutError(err error) bool {
-	t, ok := err.(net.Error)
-	return ok && t.Timeout()
-}
-
-func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option) ([]httphead.Option, error) {
-	if len(selected) == 0 {
-		return received, nil
-	}
-	var (
-		index  int
-		option httphead.Option
-		err    error
-	)
-	index = -1
-	match := func() (ok bool) {
-		for _, want := range wanted {
-			// A server accepts one or more extensions by including a
-			// |Sec-WebSocket-Extensions| header field containing one or more
-			// extensions that were requested by the client.
-			//
-			// The interpretation of any extension parameters, and what
-			// constitutes a valid response by a server to a requested set of
-			// parameters by a client, will be defined by each such extension.
-			if bytes.Equal(option.Name, want.Name) {
-				// Check parsed extension to be present in client
-				// requested extensions. We move matched extension
-				// from client list to avoid allocation of httphead.Option.Name,
-				// httphead.Option.Parameters have to be copied from the header
-				want.Parameters, _ = option.Parameters.Copy(make([]byte, option.Parameters.Size()))
-				received = append(received, want)
-				return true
-			}
-		}
-		return false
-	}
-	ok := httphead.ScanOptions(selected, func(i int, name, attr, val []byte) httphead.Control {
-		if i != index {
-			// Met next option.
-			index = i
-			if i != 0 && !match() {
-				// Server returned non-requested extension.
-				err = ErrHandshakeBadExtensions
-				return httphead.ControlBreak
-			}
-			option = httphead.Option{Name: name}
-		}
-		if attr != nil {
-			option.Parameters.Set(attr, val)
-		}
-		return httphead.ControlContinue
-	})
-	if !ok {
-		err = ErrMalformedResponse
-		return received, err
-	}
-	if !match() {
-		return received, ErrHandshakeBadExtensions
-	}
-	return received, err
-}
-
-// setupContextDeadliner is a helper function that starts connection I/O
-// interrupter goroutine.
-//
-// Started goroutine calls SetDeadline() with long time ago value when context
-// become expired to make any I/O operations failed. It returns done function
-// that stops started goroutine and maps error received from conn I/O methods
-// to possible context expiration error.
-//
-// In concern with possible SetDeadline() call inside interrupter goroutine,
-// caller passes pointer to its I/O error (even if it is nil) to done(&err).
-// That is, even if I/O error is nil, context could be already expired and
-// connection "poisoned" by SetDeadline() call. In that case done(&err) will
-// store at *err ctx.Err() result. If err is caused not by timeout, it will
-// leaved untouched.
-func setupContextDeadliner(ctx context.Context, conn net.Conn) (done func(*error)) {
-	var (
-		quit      = make(chan struct{})
-		interrupt = make(chan error, 1)
-	)
-	go func() {
-		select {
-		case <-quit:
-			interrupt <- nil
-		case <-ctx.Done():
-			// Cancel i/o immediately.
-			conn.SetDeadline(aLongTimeAgo)
-			interrupt <- ctx.Err()
-		}
-	}()
-	return func(err *error) {
-		close(quit)
-		// If ctx.Err() is non-nil and the original err is net.Error with
-		// Timeout() == true, then it means that I/O was canceled by us by
-		// SetDeadline(aLongTimeAgo) call, or by somebody else previously
-		// by conn.SetDeadline(x).
-		//
-		// Even on race condition when both deadlines are expired
-		// (SetDeadline() made not by us and context's), we prefer ctx.Err() to
-		// be returned.
-		if ctxErr := <-interrupt; ctxErr != nil && (*err == nil || isTimeoutError(*err)) {
-			*err = ctxErr
-		}
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/dialer_tls_go17.go b/vendor/github.com/gobwas/ws/dialer_tls_go17.go
deleted file mode 100644
index b606e0a..0000000
--- a/vendor/github.com/gobwas/ws/dialer_tls_go17.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// +build !go1.8
-
-package ws
-
-import "crypto/tls"
-
-func tlsCloneConfig(c *tls.Config) *tls.Config {
-	// NOTE: we copying SessionTicketsDisabled and SessionTicketKey here
-	// without calling inner c.initOnceServer somehow because we only could get
-	// here from the ws.Dialer code, which is obviously a client and makes
-	// tls.Client() when it gets new net.Conn.
-	return &tls.Config{
-		Rand:                        c.Rand,
-		Time:                        c.Time,
-		Certificates:                c.Certificates,
-		NameToCertificate:           c.NameToCertificate,
-		GetCertificate:              c.GetCertificate,
-		RootCAs:                     c.RootCAs,
-		NextProtos:                  c.NextProtos,
-		ServerName:                  c.ServerName,
-		ClientAuth:                  c.ClientAuth,
-		ClientCAs:                   c.ClientCAs,
-		InsecureSkipVerify:          c.InsecureSkipVerify,
-		CipherSuites:                c.CipherSuites,
-		PreferServerCipherSuites:    c.PreferServerCipherSuites,
-		SessionTicketsDisabled:      c.SessionTicketsDisabled,
-		SessionTicketKey:            c.SessionTicketKey,
-		ClientSessionCache:          c.ClientSessionCache,
-		MinVersion:                  c.MinVersion,
-		MaxVersion:                  c.MaxVersion,
-		CurvePreferences:            c.CurvePreferences,
-		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
-		Renegotiation:               c.Renegotiation,
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/dialer_tls_go18.go b/vendor/github.com/gobwas/ws/dialer_tls_go18.go
deleted file mode 100644
index 5589ee5..0000000
--- a/vendor/github.com/gobwas/ws/dialer_tls_go18.go
+++ /dev/null
@@ -1,10 +0,0 @@
-//go:build go1.8
-// +build go1.8
-
-package ws
-
-import "crypto/tls"
-
-func tlsCloneConfig(c *tls.Config) *tls.Config {
-	return c.Clone()
-}
diff --git a/vendor/github.com/gobwas/ws/doc.go b/vendor/github.com/gobwas/ws/doc.go
deleted file mode 100644
index 0118ce2..0000000
--- a/vendor/github.com/gobwas/ws/doc.go
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
-Package ws implements a client and server for the WebSocket protocol as
-specified in RFC 6455.
-
-The main purpose of this package is to provide simple low-level API for
-efficient work with protocol.
-
-Overview.
-
-Upgrade to WebSocket (or WebSocket handshake) can be done in two ways.
-
-The first way is to use `net/http` server:
-
-	http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
-		conn, _, _, err := ws.UpgradeHTTP(r, w)
-	})
-
-The second and much more efficient way is so-called "zero-copy upgrade". It
-avoids redundant allocations and copying of not used headers or other request
-data. User decides by himself which data should be copied.
-
-	ln, err := net.Listen("tcp", ":8080")
-	if err != nil {
-		// handle error
-	}
-
-	conn, err := ln.Accept()
-	if err != nil {
-		// handle error
-	}
-
-	handshake, err := ws.Upgrade(conn)
-	if err != nil {
-		// handle error
-	}
-
-For customization details see `ws.Upgrader` documentation.
-
-After WebSocket handshake you can work with connection in multiple ways.
-That is, `ws` does not force the only one way of how to work with WebSocket:
-
-	header, err := ws.ReadHeader(conn)
-	if err != nil {
-		// handle err
-	}
-
-	buf := make([]byte, header.Length)
-	_, err := io.ReadFull(conn, buf)
-	if err != nil {
-		// handle err
-	}
-
-	resp := ws.NewBinaryFrame([]byte("hello, world!"))
-	if err := ws.WriteFrame(conn, frame); err != nil {
-	    // handle err
-	}
-
-As you can see, it stream friendly:
-
-	const N = 42
-
-	ws.WriteHeader(ws.Header{
-		Fin:    true,
-		Length: N,
-		OpCode: ws.OpBinary,
-	})
-
-	io.CopyN(conn, rand.Reader, N)
-
-Or:
-
-	header, err := ws.ReadHeader(conn)
-	if err != nil {
-		// handle err
-	}
-
-	io.CopyN(ioutil.Discard, conn, header.Length)
-
-For more info see the documentation.
-*/
-package ws
diff --git a/vendor/github.com/gobwas/ws/errors.go b/vendor/github.com/gobwas/ws/errors.go
deleted file mode 100644
index f5668b2..0000000
--- a/vendor/github.com/gobwas/ws/errors.go
+++ /dev/null
@@ -1,59 +0,0 @@
-package ws
-
-// RejectOption represents an option used to control the way connection is
-// rejected.
-type RejectOption func(*ConnectionRejectedError)
-
-// RejectionReason returns an option that makes connection to be rejected with
-// given reason.
-func RejectionReason(reason string) RejectOption {
-	return func(err *ConnectionRejectedError) {
-		err.reason = reason
-	}
-}
-
-// RejectionStatus returns an option that makes connection to be rejected with
-// given HTTP status code.
-func RejectionStatus(code int) RejectOption {
-	return func(err *ConnectionRejectedError) {
-		err.code = code
-	}
-}
-
-// RejectionHeader returns an option that makes connection to be rejected with
-// given HTTP headers.
-func RejectionHeader(h HandshakeHeader) RejectOption {
-	return func(err *ConnectionRejectedError) {
-		err.header = h
-	}
-}
-
-// RejectConnectionError constructs an error that could be used to control the
-// way handshake is rejected by Upgrader.
-func RejectConnectionError(options ...RejectOption) error {
-	err := new(ConnectionRejectedError)
-	for _, opt := range options {
-		opt(err)
-	}
-	return err
-}
-
-// ConnectionRejectedError represents a rejection of connection during
-// WebSocket handshake error.
-//
-// It can be returned by Upgrader's On* hooks to indicate that WebSocket
-// handshake should be rejected.
-type ConnectionRejectedError struct {
-	reason string
-	code   int
-	header HandshakeHeader
-}
-
-// Error implements error interface.
-func (r *ConnectionRejectedError) Error() string {
-	return r.reason
-}
-
-func (r *ConnectionRejectedError) StatusCode() int {
-	return r.code
-}
diff --git a/vendor/github.com/gobwas/ws/frame.go b/vendor/github.com/gobwas/ws/frame.go
deleted file mode 100644
index ae10144..0000000
--- a/vendor/github.com/gobwas/ws/frame.go
+++ /dev/null
@@ -1,420 +0,0 @@
-package ws
-
-import (
-	"bytes"
-	"encoding/binary"
-	"math/rand"
-)
-
-// Constants defined by specification.
-const (
-	// All control frames MUST have a payload length of 125 bytes or less and MUST NOT be fragmented.
-	MaxControlFramePayloadSize = 125
-)
-
-// OpCode represents operation code.
-type OpCode byte
-
-// Operation codes defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-const (
-	OpContinuation OpCode = 0x0
-	OpText         OpCode = 0x1
-	OpBinary       OpCode = 0x2
-	OpClose        OpCode = 0x8
-	OpPing         OpCode = 0x9
-	OpPong         OpCode = 0xa
-)
-
-// IsControl checks whether the c is control operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.5
-func (c OpCode) IsControl() bool {
-	// RFC6455: Control frames are identified by opcodes where
-	// the most significant bit of the opcode is 1.
-	//
-	// Note that OpCode is only 4 bit length.
-	return c&0x8 != 0
-}
-
-// IsData checks whether the c is data operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.6
-func (c OpCode) IsData() bool {
-	// RFC6455: Data frames (e.g., non-control frames) are identified by opcodes
-	// where the most significant bit of the opcode is 0.
-	//
-	// Note that OpCode is only 4 bit length.
-	return c&0x8 == 0
-}
-
-// IsReserved checks whether the c is reserved operation code.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-func (c OpCode) IsReserved() bool {
-	// RFC6455:
-	// %x3-7 are reserved for further non-control frames
-	// %xB-F are reserved for further control frames
-	return (0x3 <= c && c <= 0x7) || (0xb <= c && c <= 0xf)
-}
-
-// StatusCode represents the encoded reason for closure of websocket connection.
-//
-// There are few helper methods on StatusCode that helps to define a range in
-// which given code is lay in. accordingly to ranges defined in specification.
-//
-// See https://tools.ietf.org/html/rfc6455#section-7.4
-type StatusCode uint16
-
-// StatusCodeRange describes range of StatusCode values.
-type StatusCodeRange struct {
-	Min, Max StatusCode
-}
-
-// Status code ranges defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-7.4.2
-var (
-	StatusRangeNotInUse    = StatusCodeRange{0, 999}
-	StatusRangeProtocol    = StatusCodeRange{1000, 2999}
-	StatusRangeApplication = StatusCodeRange{3000, 3999}
-	StatusRangePrivate     = StatusCodeRange{4000, 4999}
-)
-
-// Status codes defined by specification.
-// See https://tools.ietf.org/html/rfc6455#section-7.4.1
-const (
-	StatusNormalClosure           StatusCode = 1000
-	StatusGoingAway               StatusCode = 1001
-	StatusProtocolError           StatusCode = 1002
-	StatusUnsupportedData         StatusCode = 1003
-	StatusNoMeaningYet            StatusCode = 1004
-	StatusInvalidFramePayloadData StatusCode = 1007
-	StatusPolicyViolation         StatusCode = 1008
-	StatusMessageTooBig           StatusCode = 1009
-	StatusMandatoryExt            StatusCode = 1010
-	StatusInternalServerError     StatusCode = 1011
-	StatusTLSHandshake            StatusCode = 1015
-
-	// StatusAbnormalClosure is a special code designated for use in
-	// applications.
-	StatusAbnormalClosure StatusCode = 1006
-
-	// StatusNoStatusRcvd is a special code designated for use in applications.
-	StatusNoStatusRcvd StatusCode = 1005
-)
-
-// In reports whether the code is defined in given range.
-func (s StatusCode) In(r StatusCodeRange) bool {
-	return r.Min <= s && s <= r.Max
-}
-
-// Empty reports whether the code is empty.
-// Empty code has no any meaning neither app level codes nor other.
-// This method is useful just to check that code is golang default value 0.
-func (s StatusCode) Empty() bool {
-	return s == 0
-}
-
-// IsNotUsed reports whether the code is predefined in not used range.
-func (s StatusCode) IsNotUsed() bool {
-	return s.In(StatusRangeNotInUse)
-}
-
-// IsApplicationSpec reports whether the code should be defined by
-// application, framework or libraries specification.
-func (s StatusCode) IsApplicationSpec() bool {
-	return s.In(StatusRangeApplication)
-}
-
-// IsPrivateSpec reports whether the code should be defined privately.
-func (s StatusCode) IsPrivateSpec() bool {
-	return s.In(StatusRangePrivate)
-}
-
-// IsProtocolSpec reports whether the code should be defined by protocol specification.
-func (s StatusCode) IsProtocolSpec() bool {
-	return s.In(StatusRangeProtocol)
-}
-
-// IsProtocolDefined reports whether the code is already defined by protocol specification.
-func (s StatusCode) IsProtocolDefined() bool {
-	switch s {
-	case StatusNormalClosure,
-		StatusGoingAway,
-		StatusProtocolError,
-		StatusUnsupportedData,
-		StatusInvalidFramePayloadData,
-		StatusPolicyViolation,
-		StatusMessageTooBig,
-		StatusMandatoryExt,
-		StatusInternalServerError,
-		StatusNoStatusRcvd,
-		StatusAbnormalClosure,
-		StatusTLSHandshake:
-		return true
-	}
-	return false
-}
-
-// IsProtocolReserved reports whether the code is defined by protocol specification
-// to be reserved only for application usage purpose.
-func (s StatusCode) IsProtocolReserved() bool {
-	switch s {
-	// [RFC6455]: {1005,1006,1015} is a reserved value and MUST NOT be set as a status code in a
-	// Close control frame by an endpoint.
-	case StatusNoStatusRcvd, StatusAbnormalClosure, StatusTLSHandshake:
-		return true
-	default:
-		return false
-	}
-}
-
-// Compiled control frames for common use cases.
-// For construct-serialize optimizations.
-var (
-	CompiledPing  = MustCompileFrame(NewPingFrame(nil))
-	CompiledPong  = MustCompileFrame(NewPongFrame(nil))
-	CompiledClose = MustCompileFrame(NewCloseFrame(nil))
-
-	CompiledCloseNormalClosure           = MustCompileFrame(closeFrameNormalClosure)
-	CompiledCloseGoingAway               = MustCompileFrame(closeFrameGoingAway)
-	CompiledCloseProtocolError           = MustCompileFrame(closeFrameProtocolError)
-	CompiledCloseUnsupportedData         = MustCompileFrame(closeFrameUnsupportedData)
-	CompiledCloseNoMeaningYet            = MustCompileFrame(closeFrameNoMeaningYet)
-	CompiledCloseInvalidFramePayloadData = MustCompileFrame(closeFrameInvalidFramePayloadData)
-	CompiledClosePolicyViolation         = MustCompileFrame(closeFramePolicyViolation)
-	CompiledCloseMessageTooBig           = MustCompileFrame(closeFrameMessageTooBig)
-	CompiledCloseMandatoryExt            = MustCompileFrame(closeFrameMandatoryExt)
-	CompiledCloseInternalServerError     = MustCompileFrame(closeFrameInternalServerError)
-	CompiledCloseTLSHandshake            = MustCompileFrame(closeFrameTLSHandshake)
-)
-
-// Header represents websocket frame header.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-type Header struct {
-	Fin    bool
-	Rsv    byte
-	OpCode OpCode
-	Masked bool
-	Mask   [4]byte
-	Length int64
-}
-
-// Rsv1 reports whether the header has first rsv bit set.
-func (h Header) Rsv1() bool { return h.Rsv&bit5 != 0 }
-
-// Rsv2 reports whether the header has second rsv bit set.
-func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 }
-
-// Rsv3 reports whether the header has third rsv bit set.
-func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 }
-
-// Rsv creates rsv byte representation from bits.
-func Rsv(r1, r2, r3 bool) (rsv byte) {
-	if r1 {
-		rsv |= bit5
-	}
-	if r2 {
-		rsv |= bit6
-	}
-	if r3 {
-		rsv |= bit7
-	}
-	return rsv
-}
-
-// RsvBits returns rsv bits from bytes representation.
-func RsvBits(rsv byte) (r1, r2, r3 bool) {
-	r1 = rsv&bit5 != 0
-	r2 = rsv&bit6 != 0
-	r3 = rsv&bit7 != 0
-	return r1, r2, r3
-}
-
-// Frame represents websocket frame.
-// See https://tools.ietf.org/html/rfc6455#section-5.2
-type Frame struct {
-	Header  Header
-	Payload []byte
-}
-
-// NewFrame creates frame with given operation code,
-// flag of completeness and payload bytes.
-func NewFrame(op OpCode, fin bool, p []byte) Frame {
-	return Frame{
-		Header: Header{
-			Fin:    fin,
-			OpCode: op,
-			Length: int64(len(p)),
-		},
-		Payload: p,
-	}
-}
-
-// NewTextFrame creates text frame with p as payload.
-// Note that p is not copied.
-func NewTextFrame(p []byte) Frame {
-	return NewFrame(OpText, true, p)
-}
-
-// NewBinaryFrame creates binary frame with p as payload.
-// Note that p is not copied.
-func NewBinaryFrame(p []byte) Frame {
-	return NewFrame(OpBinary, true, p)
-}
-
-// NewPingFrame creates ping frame with p as payload.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewPingFrame(p []byte) Frame {
-	return NewFrame(OpPing, true, p)
-}
-
-// NewPongFrame creates pong frame with p as payload.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewPongFrame(p []byte) Frame {
-	return NewFrame(OpPong, true, p)
-}
-
-// NewCloseFrame creates close frame with given close body.
-// Note that p is not copied.
-// Note that p must have length of MaxControlFramePayloadSize bytes or less due
-// to RFC.
-func NewCloseFrame(p []byte) Frame {
-	return NewFrame(OpClose, true, p)
-}
-
-// NewCloseFrameBody encodes a closure code and a reason into a binary
-// representation.
-//
-// It returns slice which is at most MaxControlFramePayloadSize bytes length.
-// If the reason is too big it will be cropped to fit the limit defined by the
-// spec.
-//
-// See https://tools.ietf.org/html/rfc6455#section-5.5
-func NewCloseFrameBody(code StatusCode, reason string) []byte {
-	n := min(2+len(reason), MaxControlFramePayloadSize)
-	p := make([]byte, n)
-
-	crop := min(MaxControlFramePayloadSize-2, len(reason))
-	PutCloseFrameBody(p, code, reason[:crop])
-
-	return p
-}
-
-// PutCloseFrameBody encodes code and reason into buf.
-//
-// It will panic if the buffer is too small to accommodate a code or a reason.
-//
-// PutCloseFrameBody does not check buffer to be RFC compliant, but note that
-// by RFC it must be at most MaxControlFramePayloadSize.
-func PutCloseFrameBody(p []byte, code StatusCode, reason string) {
-	_ = p[1+len(reason)]
-	binary.BigEndian.PutUint16(p, uint16(code))
-	copy(p[2:], reason)
-}
-
-// MaskFrame masks frame and returns frame with masked payload and Mask header's field set.
-// Note that it copies f payload to prevent collisions.
-// For less allocations you could use MaskFrameInPlace or construct frame manually.
-func MaskFrame(f Frame) Frame {
-	return MaskFrameWith(f, NewMask())
-}
-
-// MaskFrameWith masks frame with given mask and returns frame
-// with masked payload and Mask header's field set.
-// Note that it copies f payload to prevent collisions.
-// For less allocations you could use MaskFrameInPlaceWith or construct frame manually.
-func MaskFrameWith(f Frame, mask [4]byte) Frame {
-	// TODO(gobwas): check CopyCipher ws copy() Cipher().
-	p := make([]byte, len(f.Payload))
-	copy(p, f.Payload)
-	f.Payload = p
-	return MaskFrameInPlaceWith(f, mask)
-}
-
-// MaskFrameInPlace masks frame and returns frame with masked payload and Mask
-// header's field set.
-// Note that it applies xor cipher to f.Payload without copying, that is, it
-// modifies f.Payload inplace.
-func MaskFrameInPlace(f Frame) Frame {
-	return MaskFrameInPlaceWith(f, NewMask())
-}
-
-var zeroMask [4]byte
-
-// UnmaskFrame unmasks frame and returns frame with unmasked payload and Mask
-// header's field cleared.
-// Note that it copies f payload.
-func UnmaskFrame(f Frame) Frame {
-	p := make([]byte, len(f.Payload))
-	copy(p, f.Payload)
-	f.Payload = p
-	return UnmaskFrameInPlace(f)
-}
-
-// UnmaskFrameInPlace unmasks frame and returns frame with unmasked payload and
-// Mask header's field cleared.
-// Note that it applies xor cipher to f.Payload without copying, that is, it
-// modifies f.Payload inplace.
-func UnmaskFrameInPlace(f Frame) Frame {
-	Cipher(f.Payload, f.Header.Mask, 0)
-	f.Header.Masked = false
-	f.Header.Mask = zeroMask
-	return f
-}
-
-// MaskFrameInPlaceWith masks frame with given mask and returns frame
-// with masked payload and Mask header's field set.
-// Note that it applies xor cipher to f.Payload without copying, that is, it
-// modifies f.Payload inplace.
-func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame {
-	f.Header.Masked = true
-	f.Header.Mask = m
-	Cipher(f.Payload, m, 0)
-	return f
-}
-
-// NewMask creates new random mask.
-func NewMask() (ret [4]byte) {
-	binary.BigEndian.PutUint32(ret[:], rand.Uint32())
-	return ret
-}
-
-// CompileFrame returns byte representation of given frame.
-// In terms of memory consumption it is useful to precompile static frames
-// which are often used.
-func CompileFrame(f Frame) (bts []byte, err error) {
-	buf := bytes.NewBuffer(make([]byte, 0, 16))
-	err = WriteFrame(buf, f)
-	bts = buf.Bytes()
-	return bts, err
-}
-
-// MustCompileFrame is like CompileFrame but panics if frame can not be
-// encoded.
-func MustCompileFrame(f Frame) []byte {
-	bts, err := CompileFrame(f)
-	if err != nil {
-		panic(err)
-	}
-	return bts
-}
-
-func makeCloseFrame(code StatusCode) Frame {
-	return NewCloseFrame(NewCloseFrameBody(code, ""))
-}
-
-var (
-	closeFrameNormalClosure           = makeCloseFrame(StatusNormalClosure)
-	closeFrameGoingAway               = makeCloseFrame(StatusGoingAway)
-	closeFrameProtocolError           = makeCloseFrame(StatusProtocolError)
-	closeFrameUnsupportedData         = makeCloseFrame(StatusUnsupportedData)
-	closeFrameNoMeaningYet            = makeCloseFrame(StatusNoMeaningYet)
-	closeFrameInvalidFramePayloadData = makeCloseFrame(StatusInvalidFramePayloadData)
-	closeFramePolicyViolation         = makeCloseFrame(StatusPolicyViolation)
-	closeFrameMessageTooBig           = makeCloseFrame(StatusMessageTooBig)
-	closeFrameMandatoryExt            = makeCloseFrame(StatusMandatoryExt)
-	closeFrameInternalServerError     = makeCloseFrame(StatusInternalServerError)
-	closeFrameTLSHandshake            = makeCloseFrame(StatusTLSHandshake)
-)
diff --git a/vendor/github.com/gobwas/ws/hijack_go119.go b/vendor/github.com/gobwas/ws/hijack_go119.go
deleted file mode 100644
index 6ac556c..0000000
--- a/vendor/github.com/gobwas/ws/hijack_go119.go
+++ /dev/null
@@ -1,18 +0,0 @@
-//go:build !go1.20
-// +build !go1.20
-
-package ws
-
-import (
-	"bufio"
-	"net"
-	"net/http"
-)
-
-func hijack(w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) {
-	hj, ok := w.(http.Hijacker)
-	if ok {
-		return hj.Hijack()
-	}
-	return nil, nil, ErrNotHijacker
-}
diff --git a/vendor/github.com/gobwas/ws/hijack_go120.go b/vendor/github.com/gobwas/ws/hijack_go120.go
deleted file mode 100644
index e67b439..0000000
--- a/vendor/github.com/gobwas/ws/hijack_go120.go
+++ /dev/null
@@ -1,19 +0,0 @@
-//go:build go1.20
-// +build go1.20
-
-package ws
-
-import (
-	"bufio"
-	"errors"
-	"net"
-	"net/http"
-)
-
-func hijack(w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) {
-	conn, rw, err := http.NewResponseController(w).Hijack()
-	if errors.Is(err, http.ErrNotSupported) {
-		return nil, nil, ErrNotHijacker
-	}
-	return conn, rw, err
-}
diff --git a/vendor/github.com/gobwas/ws/http.go b/vendor/github.com/gobwas/ws/http.go
deleted file mode 100644
index a3a682d..0000000
--- a/vendor/github.com/gobwas/ws/http.go
+++ /dev/null
@@ -1,507 +0,0 @@
-package ws
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"net/http"
-	"net/url"
-	"strconv"
-
-	"github.com/gobwas/httphead"
-)
-
-const (
-	crlf          = "\r\n"
-	colonAndSpace = ": "
-	commaAndSpace = ", "
-)
-
-const (
-	textHeadUpgrade = "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\n"
-)
-
-var (
-	textHeadBadRequest          = statusText(http.StatusBadRequest)
-	textHeadInternalServerError = statusText(http.StatusInternalServerError)
-	textHeadUpgradeRequired     = statusText(http.StatusUpgradeRequired)
-
-	textTailErrHandshakeBadProtocol   = errorText(ErrHandshakeBadProtocol)
-	textTailErrHandshakeBadMethod     = errorText(ErrHandshakeBadMethod)
-	textTailErrHandshakeBadHost       = errorText(ErrHandshakeBadHost)
-	textTailErrHandshakeBadUpgrade    = errorText(ErrHandshakeBadUpgrade)
-	textTailErrHandshakeBadConnection = errorText(ErrHandshakeBadConnection)
-	textTailErrHandshakeBadSecAccept  = errorText(ErrHandshakeBadSecAccept)
-	textTailErrHandshakeBadSecKey     = errorText(ErrHandshakeBadSecKey)
-	textTailErrHandshakeBadSecVersion = errorText(ErrHandshakeBadSecVersion)
-	textTailErrUpgradeRequired        = errorText(ErrHandshakeUpgradeRequired)
-)
-
-const (
-	// Every new header must be added to TestHeaderNames test.
-	headerHost          = "Host"
-	headerUpgrade       = "Upgrade"
-	headerConnection    = "Connection"
-	headerSecVersion    = "Sec-WebSocket-Version"
-	headerSecProtocol   = "Sec-WebSocket-Protocol"
-	headerSecExtensions = "Sec-WebSocket-Extensions"
-	headerSecKey        = "Sec-WebSocket-Key"
-	headerSecAccept     = "Sec-WebSocket-Accept"
-
-	headerHostCanonical          = headerHost
-	headerUpgradeCanonical       = headerUpgrade
-	headerConnectionCanonical    = headerConnection
-	headerSecVersionCanonical    = "Sec-Websocket-Version"
-	headerSecProtocolCanonical   = "Sec-Websocket-Protocol"
-	headerSecExtensionsCanonical = "Sec-Websocket-Extensions"
-	headerSecKeyCanonical        = "Sec-Websocket-Key"
-	headerSecAcceptCanonical     = "Sec-Websocket-Accept"
-)
-
-var (
-	specHeaderValueUpgrade         = []byte("websocket")
-	specHeaderValueConnection      = []byte("Upgrade")
-	specHeaderValueConnectionLower = []byte("upgrade")
-	specHeaderValueSecVersion      = []byte("13")
-)
-
-var (
-	httpVersion1_0    = []byte("HTTP/1.0")
-	httpVersion1_1    = []byte("HTTP/1.1")
-	httpVersionPrefix = []byte("HTTP/")
-)
-
-type httpRequestLine struct {
-	method, uri  []byte
-	major, minor int
-}
-
-type httpResponseLine struct {
-	major, minor int
-	status       int
-	reason       []byte
-}
-
-// httpParseRequestLine parses http request line like "GET / HTTP/1.0".
-func httpParseRequestLine(line []byte) (req httpRequestLine, err error) {
-	var proto []byte
-	req.method, req.uri, proto = bsplit3(line, ' ')
-
-	var ok bool
-	req.major, req.minor, ok = httpParseVersion(proto)
-	if !ok {
-		err = ErrMalformedRequest
-	}
-	return req, err
-}
-
-func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) {
-	var (
-		proto  []byte
-		status []byte
-	)
-	proto, status, resp.reason = bsplit3(line, ' ')
-
-	var ok bool
-	resp.major, resp.minor, ok = httpParseVersion(proto)
-	if !ok {
-		return resp, ErrMalformedResponse
-	}
-
-	var convErr error
-	resp.status, convErr = asciiToInt(status)
-	if convErr != nil {
-		return resp, ErrMalformedResponse
-	}
-
-	return resp, nil
-}
-
-// httpParseVersion parses major and minor version of HTTP protocol. It returns
-// parsed values and true if parse is ok.
-func httpParseVersion(bts []byte) (major, minor int, ok bool) {
-	switch {
-	case bytes.Equal(bts, httpVersion1_0):
-		return 1, 0, true
-	case bytes.Equal(bts, httpVersion1_1):
-		return 1, 1, true
-	case len(bts) < 8:
-		return 0, 0, false
-	case !bytes.Equal(bts[:5], httpVersionPrefix):
-		return 0, 0, false
-	}
-
-	bts = bts[5:]
-
-	dot := bytes.IndexByte(bts, '.')
-	if dot == -1 {
-		return 0, 0, false
-	}
-	var err error
-	major, err = asciiToInt(bts[:dot])
-	if err != nil {
-		return major, 0, false
-	}
-	minor, err = asciiToInt(bts[dot+1:])
-	if err != nil {
-		return major, minor, false
-	}
-
-	return major, minor, true
-}
-
-// httpParseHeaderLine parses HTTP header as key-value pair. It returns parsed
-// values and true if parse is ok.
-func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) {
-	colon := bytes.IndexByte(line, ':')
-	if colon == -1 {
-		return nil, nil, false
-	}
-
-	k = btrim(line[:colon])
-	// TODO(gobwas): maybe use just lower here?
-	canonicalizeHeaderKey(k)
-
-	v = btrim(line[colon+1:])
-
-	return k, v, true
-}
-
-// httpGetHeader is the same as textproto.MIMEHeader.Get, except the thing,
-// that key is already canonical. This helps to increase performance.
-func httpGetHeader(h http.Header, key string) string {
-	if h == nil {
-		return ""
-	}
-	v := h[key]
-	if len(v) == 0 {
-		return ""
-	}
-	return v[0]
-}
-
-// The request MAY include a header field with the name
-// |Sec-WebSocket-Protocol|.  If present, this value indicates one or more
-// comma-separated subprotocol the client wishes to speak, ordered by
-// preference.  The elements that comprise this value MUST be non-empty strings
-// with characters in the range U+0021 to U+007E not including separator
-// characters as defined in [RFC2616] and MUST all be unique strings.  The ABNF
-// for the value of this header field is 1#token, where the definitions of
-// constructs and rules are as given in [RFC2616].
-func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool) {
-	ok = httphead.ScanTokens(strToBytes(h), func(v []byte) bool {
-		if check(btsToString(v)) {
-			ret = string(v)
-			return false
-		}
-		return true
-	})
-	return ret, ok
-}
-
-func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) {
-	var selected []byte
-	ok = httphead.ScanTokens(h, func(v []byte) bool {
-		if check(v) {
-			selected = v
-			return false
-		}
-		return true
-	})
-	if ok && selected != nil {
-		return string(selected), true
-	}
-	return ret, ok
-}
-
-func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) {
-	s := httphead.OptionSelector{
-		Flags: httphead.SelectCopy,
-		Check: check,
-	}
-	return s.Select(h, selected)
-}
-
-func negotiateMaybe(in httphead.Option, dest []httphead.Option, f func(httphead.Option) (httphead.Option, error)) ([]httphead.Option, error) {
-	if in.Size() == 0 {
-		return dest, nil
-	}
-	opt, err := f(in)
-	if err != nil {
-		return nil, err
-	}
-	if opt.Size() > 0 {
-		dest = append(dest, opt)
-	}
-	return dest, nil
-}
-
-func negotiateExtensions(
-	h []byte, dest []httphead.Option,
-	f func(httphead.Option) (httphead.Option, error),
-) (_ []httphead.Option, err error) {
-	index := -1
-	var current httphead.Option
-	ok := httphead.ScanOptions(h, func(i int, name, attr, val []byte) httphead.Control {
-		if i != index {
-			dest, err = negotiateMaybe(current, dest, f)
-			if err != nil {
-				return httphead.ControlBreak
-			}
-			index = i
-			current = httphead.Option{Name: name}
-		}
-		if attr != nil {
-			current.Parameters.Set(attr, val)
-		}
-		return httphead.ControlContinue
-	})
-	if !ok {
-		return nil, ErrMalformedRequest
-	}
-	return negotiateMaybe(current, dest, f)
-}
-
-func httpWriteHeader(bw *bufio.Writer, key, value string) {
-	httpWriteHeaderKey(bw, key)
-	bw.WriteString(value)
-	bw.WriteString(crlf)
-}
-
-func httpWriteHeaderBts(bw *bufio.Writer, key string, value []byte) {
-	httpWriteHeaderKey(bw, key)
-	bw.Write(value)
-	bw.WriteString(crlf)
-}
-
-func httpWriteHeaderKey(bw *bufio.Writer, key string) {
-	bw.WriteString(key)
-	bw.WriteString(colonAndSpace)
-}
-
-func httpWriteUpgradeRequest(
-	bw *bufio.Writer,
-	u *url.URL,
-	nonce []byte,
-	protocols []string,
-	extensions []httphead.Option,
-	header HandshakeHeader,
-	host string,
-) {
-	bw.WriteString("GET ")
-	bw.WriteString(u.RequestURI())
-	bw.WriteString(" HTTP/1.1\r\n")
-
-	if host == "" {
-		host = u.Host
-	}
-	httpWriteHeader(bw, headerHost, host)
-
-	httpWriteHeaderBts(bw, headerUpgrade, specHeaderValueUpgrade)
-	httpWriteHeaderBts(bw, headerConnection, specHeaderValueConnection)
-	httpWriteHeaderBts(bw, headerSecVersion, specHeaderValueSecVersion)
-
-	// NOTE: write nonce bytes as a string to prevent heap allocation –
-	// WriteString() copy given string into its inner buffer, unlike Write()
-	// which may write p directly to the underlying io.Writer – which in turn
-	// will lead to p escape.
-	httpWriteHeader(bw, headerSecKey, btsToString(nonce))
-
-	if len(protocols) > 0 {
-		httpWriteHeaderKey(bw, headerSecProtocol)
-		for i, p := range protocols {
-			if i > 0 {
-				bw.WriteString(commaAndSpace)
-			}
-			bw.WriteString(p)
-		}
-		bw.WriteString(crlf)
-	}
-
-	if len(extensions) > 0 {
-		httpWriteHeaderKey(bw, headerSecExtensions)
-		httphead.WriteOptions(bw, extensions)
-		bw.WriteString(crlf)
-	}
-
-	if header != nil {
-		header.WriteTo(bw)
-	}
-
-	bw.WriteString(crlf)
-}
-
-func httpWriteResponseUpgrade(bw *bufio.Writer, nonce []byte, hs Handshake, header HandshakeHeaderFunc) {
-	bw.WriteString(textHeadUpgrade)
-
-	httpWriteHeaderKey(bw, headerSecAccept)
-	writeAccept(bw, nonce)
-	bw.WriteString(crlf)
-
-	if hs.Protocol != "" {
-		httpWriteHeader(bw, headerSecProtocol, hs.Protocol)
-	}
-	if len(hs.Extensions) > 0 {
-		httpWriteHeaderKey(bw, headerSecExtensions)
-		httphead.WriteOptions(bw, hs.Extensions)
-		bw.WriteString(crlf)
-	}
-	if header != nil {
-		header(bw)
-	}
-
-	bw.WriteString(crlf)
-}
-
-func httpWriteResponseError(bw *bufio.Writer, err error, code int, header HandshakeHeaderFunc) {
-	switch code {
-	case http.StatusBadRequest:
-		bw.WriteString(textHeadBadRequest)
-	case http.StatusInternalServerError:
-		bw.WriteString(textHeadInternalServerError)
-	case http.StatusUpgradeRequired:
-		bw.WriteString(textHeadUpgradeRequired)
-	default:
-		writeStatusText(bw, code)
-	}
-
-	// Write custom headers.
-	if header != nil {
-		header(bw)
-	}
-
-	switch err {
-	case ErrHandshakeBadProtocol:
-		bw.WriteString(textTailErrHandshakeBadProtocol)
-	case ErrHandshakeBadMethod:
-		bw.WriteString(textTailErrHandshakeBadMethod)
-	case ErrHandshakeBadHost:
-		bw.WriteString(textTailErrHandshakeBadHost)
-	case ErrHandshakeBadUpgrade:
-		bw.WriteString(textTailErrHandshakeBadUpgrade)
-	case ErrHandshakeBadConnection:
-		bw.WriteString(textTailErrHandshakeBadConnection)
-	case ErrHandshakeBadSecAccept:
-		bw.WriteString(textTailErrHandshakeBadSecAccept)
-	case ErrHandshakeBadSecKey:
-		bw.WriteString(textTailErrHandshakeBadSecKey)
-	case ErrHandshakeBadSecVersion:
-		bw.WriteString(textTailErrHandshakeBadSecVersion)
-	case ErrHandshakeUpgradeRequired:
-		bw.WriteString(textTailErrUpgradeRequired)
-	case nil:
-		bw.WriteString(crlf)
-	default:
-		writeErrorText(bw, err)
-	}
-}
-
-func writeStatusText(bw *bufio.Writer, code int) {
-	bw.WriteString("HTTP/1.1 ")
-	bw.WriteString(strconv.Itoa(code))
-	bw.WriteByte(' ')
-	bw.WriteString(http.StatusText(code))
-	bw.WriteString(crlf)
-	bw.WriteString("Content-Type: text/plain; charset=utf-8")
-	bw.WriteString(crlf)
-}
-
-func writeErrorText(bw *bufio.Writer, err error) {
-	body := err.Error()
-	bw.WriteString("Content-Length: ")
-	bw.WriteString(strconv.Itoa(len(body)))
-	bw.WriteString(crlf)
-	bw.WriteString(crlf)
-	bw.WriteString(body)
-}
-
-// httpError is like the http.Error with WebSocket context exception.
-func httpError(w http.ResponseWriter, body string, code int) {
-	w.Header().Set("Content-Type", "text/plain; charset=utf-8")
-	w.Header().Set("Content-Length", strconv.Itoa(len(body)))
-	w.WriteHeader(code)
-	w.Write([]byte(body))
-}
-
-// statusText is a non-performant status text generator.
-// NOTE: Used only to generate constants.
-func statusText(code int) string {
-	var buf bytes.Buffer
-	bw := bufio.NewWriter(&buf)
-	writeStatusText(bw, code)
-	bw.Flush()
-	return buf.String()
-}
-
-// errorText is a non-performant error text generator.
-// NOTE: Used only to generate constants.
-func errorText(err error) string {
-	var buf bytes.Buffer
-	bw := bufio.NewWriter(&buf)
-	writeErrorText(bw, err)
-	bw.Flush()
-	return buf.String()
-}
-
-// HandshakeHeader is the interface that writes both upgrade request or
-// response headers into a given io.Writer.
-type HandshakeHeader interface {
-	io.WriterTo
-}
-
-// HandshakeHeaderString is an adapter to allow the use of headers represented
-// by ordinary string as HandshakeHeader.
-type HandshakeHeaderString string
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (s HandshakeHeaderString) WriteTo(w io.Writer) (int64, error) {
-	n, err := io.WriteString(w, string(s))
-	return int64(n), err
-}
-
-// HandshakeHeaderBytes is an adapter to allow the use of headers represented
-// by ordinary slice of bytes as HandshakeHeader.
-type HandshakeHeaderBytes []byte
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (b HandshakeHeaderBytes) WriteTo(w io.Writer) (int64, error) {
-	n, err := w.Write(b)
-	return int64(n), err
-}
-
-// HandshakeHeaderFunc is an adapter to allow the use of headers represented by
-// ordinary function as HandshakeHeader.
-type HandshakeHeaderFunc func(io.Writer) (int64, error)
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (f HandshakeHeaderFunc) WriteTo(w io.Writer) (int64, error) {
-	return f(w)
-}
-
-// HandshakeHeaderHTTP is an adapter to allow the use of http.Header as
-// HandshakeHeader.
-type HandshakeHeaderHTTP http.Header
-
-// WriteTo implements HandshakeHeader (and io.WriterTo) interface.
-func (h HandshakeHeaderHTTP) WriteTo(w io.Writer) (int64, error) {
-	wr := writer{w: w}
-	err := http.Header(h).Write(&wr)
-	return wr.n, err
-}
-
-type writer struct {
-	n int64
-	w io.Writer
-}
-
-func (w *writer) WriteString(s string) (int, error) {
-	n, err := io.WriteString(w.w, s)
-	w.n += int64(n)
-	return n, err
-}
-
-func (w *writer) Write(p []byte) (int, error) {
-	n, err := w.w.Write(p)
-	w.n += int64(n)
-	return n, err
-}
diff --git a/vendor/github.com/gobwas/ws/nonce.go b/vendor/github.com/gobwas/ws/nonce.go
deleted file mode 100644
index 7b0edd9..0000000
--- a/vendor/github.com/gobwas/ws/nonce.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package ws
-
-import (
-	"bufio"
-	"bytes"
-	"crypto/sha1"
-	"encoding/base64"
-	"fmt"
-	"math/rand"
-)
-
-const (
-	// RFC6455: The value of this header field MUST be a nonce consisting of a
-	// randomly selected 16-byte value that has been base64-encoded (see
-	// Section 4 of [RFC4648]).  The nonce MUST be selected randomly for each
-	// connection.
-	nonceKeySize = 16
-	nonceSize    = 24 // base64.StdEncoding.EncodedLen(nonceKeySize)
-
-	// RFC6455: The value of this header field is constructed by concatenating
-	// /key/, defined above in step 4 in Section 4.2.2, with the string
-	// "258EAFA5- E914-47DA-95CA-C5AB0DC85B11", taking the SHA-1 hash of this
-	// concatenated value to obtain a 20-byte value and base64- encoding (see
-	// Section 4 of [RFC4648]) this 20-byte hash.
-	acceptSize = 28 // base64.StdEncoding.EncodedLen(sha1.Size)
-)
-
-// initNonce fills given slice with random base64-encoded nonce bytes.
-func initNonce(dst []byte) {
-	// NOTE: bts does not escape.
-	bts := make([]byte, nonceKeySize)
-	if _, err := rand.Read(bts); err != nil {
-		panic(fmt.Sprintf("rand read error: %s", err))
-	}
-	base64.StdEncoding.Encode(dst, bts)
-}
-
-// checkAcceptFromNonce reports whether given accept bytes are valid for given
-// nonce bytes.
-func checkAcceptFromNonce(accept, nonce []byte) bool {
-	if len(accept) != acceptSize {
-		return false
-	}
-	// NOTE: expect does not escape.
-	expect := make([]byte, acceptSize)
-	initAcceptFromNonce(expect, nonce)
-	return bytes.Equal(expect, accept)
-}
-
-// initAcceptFromNonce fills given slice with accept bytes generated from given
-// nonce bytes. Given buffer should be exactly acceptSize bytes.
-func initAcceptFromNonce(accept, nonce []byte) {
-	const magic = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"
-
-	if len(accept) != acceptSize {
-		panic("accept buffer is invalid")
-	}
-	if len(nonce) != nonceSize {
-		panic("nonce is invalid")
-	}
-
-	p := make([]byte, nonceSize+len(magic))
-	copy(p[:nonceSize], nonce)
-	copy(p[nonceSize:], magic)
-
-	sum := sha1.Sum(p)
-	base64.StdEncoding.Encode(accept, sum[:])
-}
-
-func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) {
-	accept := make([]byte, acceptSize)
-	initAcceptFromNonce(accept, nonce)
-	// NOTE: write accept bytes as a string to prevent heap allocation –
-	// WriteString() copy given string into its inner buffer, unlike Write()
-	// which may write p directly to the underlying io.Writer – which in turn
-	// will lead to p escape.
-	return bw.WriteString(btsToString(accept))
-}
diff --git a/vendor/github.com/gobwas/ws/read.go b/vendor/github.com/gobwas/ws/read.go
deleted file mode 100644
index 1771816..0000000
--- a/vendor/github.com/gobwas/ws/read.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package ws
-
-import (
-	"encoding/binary"
-	"fmt"
-	"io"
-)
-
-// Errors used by frame reader.
-var (
-	ErrHeaderLengthMSB        = fmt.Errorf("header error: the most significant bit must be 0")
-	ErrHeaderLengthUnexpected = fmt.Errorf("header error: unexpected payload length bits")
-)
-
-// ReadHeader reads a frame header from r.
-func ReadHeader(r io.Reader) (h Header, err error) {
-	// Make slice of bytes with capacity 12 that could hold any header.
-	//
-	// The maximum header size is 14, but due to the 2 hop reads,
-	// after first hop that reads first 2 constant bytes, we could reuse 2 bytes.
-	// So 14 - 2 = 12.
-	bts := make([]byte, 2, MaxHeaderSize-2)
-
-	// Prepare to hold first 2 bytes to choose size of next read.
-	_, err = io.ReadFull(r, bts)
-	if err != nil {
-		return h, err
-	}
-
-	h.Fin = bts[0]&bit0 != 0
-	h.Rsv = (bts[0] & 0x70) >> 4
-	h.OpCode = OpCode(bts[0] & 0x0f)
-
-	var extra int
-
-	if bts[1]&bit0 != 0 {
-		h.Masked = true
-		extra += 4
-	}
-
-	length := bts[1] & 0x7f
-	switch {
-	case length < 126:
-		h.Length = int64(length)
-
-	case length == 126:
-		extra += 2
-
-	case length == 127:
-		extra += 8
-
-	default:
-		err = ErrHeaderLengthUnexpected
-		return h, err
-	}
-
-	if extra == 0 {
-		return h, err
-	}
-
-	// Increase len of bts to extra bytes need to read.
-	// Overwrite first 2 bytes that was read before.
-	bts = bts[:extra]
-	_, err = io.ReadFull(r, bts)
-	if err != nil {
-		return h, err
-	}
-
-	switch {
-	case length == 126:
-		h.Length = int64(binary.BigEndian.Uint16(bts[:2]))
-		bts = bts[2:]
-
-	case length == 127:
-		if bts[0]&0x80 != 0 {
-			err = ErrHeaderLengthMSB
-			return h, err
-		}
-		h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
-		bts = bts[8:]
-	}
-
-	if h.Masked {
-		copy(h.Mask[:], bts)
-	}
-
-	return h, nil
-}
-
-// ReadFrame reads a frame from r.
-// It is not designed for high optimized use case cause it makes allocation
-// for frame.Header.Length size inside to read frame payload into.
-//
-// Note that ReadFrame does not unmask payload.
-func ReadFrame(r io.Reader) (f Frame, err error) {
-	f.Header, err = ReadHeader(r)
-	if err != nil {
-		return f, err
-	}
-
-	if f.Header.Length > 0 {
-		// int(f.Header.Length) is safe here cause we have
-		// checked it for overflow above in ReadHeader.
-		f.Payload = make([]byte, int(f.Header.Length))
-		_, err = io.ReadFull(r, f.Payload)
-	}
-
-	return f, err
-}
-
-// MustReadFrame is like ReadFrame but panics if frame can not be read.
-func MustReadFrame(r io.Reader) Frame {
-	f, err := ReadFrame(r)
-	if err != nil {
-		panic(err)
-	}
-	return f
-}
-
-// ParseCloseFrameData parses close frame status code and closure reason if any provided.
-// If there is no status code in the payload
-// the empty status code is returned (code.Empty()) with empty string as a reason.
-func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) {
-	if len(payload) < 2 {
-		// We returning empty StatusCode here, preventing the situation
-		// when endpoint really sent code 1005 and we should return ProtocolError on that.
-		//
-		// In other words, we ignoring this rule [RFC6455:7.1.5]:
-		//   If this Close control frame contains no status code, _The WebSocket
-		//   Connection Close Code_ is considered to be 1005.
-		return code, reason
-	}
-	code = StatusCode(binary.BigEndian.Uint16(payload))
-	reason = string(payload[2:])
-	return code, reason
-}
-
-// ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing
-// that it does not copies payload bytes into reason, but prepares unsafe cast.
-func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) {
-	if len(payload) < 2 {
-		return code, reason
-	}
-	code = StatusCode(binary.BigEndian.Uint16(payload))
-	reason = btsToString(payload[2:])
-	return code, reason
-}
diff --git a/vendor/github.com/gobwas/ws/server.go b/vendor/github.com/gobwas/ws/server.go
deleted file mode 100644
index 863bb22..0000000
--- a/vendor/github.com/gobwas/ws/server.go
+++ /dev/null
@@ -1,658 +0,0 @@
-package ws
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io"
-	"net"
-	"net/http"
-	"strings"
-	"time"
-
-	"github.com/gobwas/httphead"
-	"github.com/gobwas/pool/pbufio"
-)
-
-// Constants used by ConnUpgrader.
-const (
-	DefaultServerReadBufferSize  = 4096
-	DefaultServerWriteBufferSize = 512
-)
-
-// Errors used by both client and server when preparing WebSocket handshake.
-var (
-	ErrHandshakeBadProtocol = RejectConnectionError(
-		RejectionStatus(http.StatusHTTPVersionNotSupported),
-		RejectionReason("handshake error: bad HTTP protocol version"),
-	)
-	ErrHandshakeBadMethod = RejectConnectionError(
-		RejectionStatus(http.StatusMethodNotAllowed),
-		RejectionReason("handshake error: bad HTTP request method"),
-	)
-	ErrHandshakeBadHost = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerHost)),
-	)
-	ErrHandshakeBadUpgrade = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerUpgrade)),
-	)
-	ErrHandshakeBadConnection = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerConnection)),
-	)
-	ErrHandshakeBadSecAccept = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecAccept)),
-	)
-	ErrHandshakeBadSecKey = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecKey)),
-	)
-	ErrHandshakeBadSecVersion = RejectConnectionError(
-		RejectionStatus(http.StatusBadRequest),
-		RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
-	)
-)
-
-// ErrMalformedResponse is returned by Dialer to indicate that server response
-// can not be parsed.
-var ErrMalformedResponse = fmt.Errorf("malformed HTTP response")
-
-// ErrMalformedRequest is returned when HTTP request can not be parsed.
-var ErrMalformedRequest = RejectConnectionError(
-	RejectionStatus(http.StatusBadRequest),
-	RejectionReason("malformed HTTP request"),
-)
-
-// ErrHandshakeUpgradeRequired is returned by Upgrader to indicate that
-// connection is rejected because given WebSocket version is malformed.
-//
-// According to RFC6455:
-// If this version does not match a version understood by the server, the
-// server MUST abort the WebSocket handshake described in this section and
-// instead send an appropriate HTTP error code (such as 426 Upgrade Required)
-// and a |Sec-WebSocket-Version| header field indicating the version(s) the
-// server is capable of understanding.
-var ErrHandshakeUpgradeRequired = RejectConnectionError(
-	RejectionStatus(http.StatusUpgradeRequired),
-	RejectionHeader(HandshakeHeaderString(headerSecVersion+": 13\r\n")),
-	RejectionReason(fmt.Sprintf("handshake error: bad %q header", headerSecVersion)),
-)
-
-// ErrNotHijacker is an error returned when http.ResponseWriter does not
-// implement http.Hijacker interface.
-var ErrNotHijacker = RejectConnectionError(
-	RejectionStatus(http.StatusInternalServerError),
-	RejectionReason("given http.ResponseWriter is not a http.Hijacker"),
-)
-
-// DefaultHTTPUpgrader is an HTTPUpgrader that holds no options and is used by
-// UpgradeHTTP function.
-var DefaultHTTPUpgrader HTTPUpgrader
-
-// UpgradeHTTP is like HTTPUpgrader{}.Upgrade().
-func UpgradeHTTP(r *http.Request, w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, Handshake, error) {
-	return DefaultHTTPUpgrader.Upgrade(r, w)
-}
-
-// DefaultUpgrader is an Upgrader that holds no options and is used by Upgrade
-// function.
-var DefaultUpgrader Upgrader
-
-// Upgrade is like Upgrader{}.Upgrade().
-func Upgrade(conn io.ReadWriter) (Handshake, error) {
-	return DefaultUpgrader.Upgrade(conn)
-}
-
-// HTTPUpgrader contains options for upgrading connection to websocket from
-// net/http Handler arguments.
-type HTTPUpgrader struct {
-	// Timeout is the maximum amount of time an Upgrade() will spent while
-	// writing handshake response.
-	//
-	// The default is no timeout.
-	Timeout time.Duration
-
-	// Header is an optional http.Header mapping that could be used to
-	// write additional headers to the handshake response.
-	//
-	// Note that if present, it will be written in any result of handshake.
-	Header http.Header
-
-	// Protocol is the select function that is used to select subprotocol from
-	// list requested by client. If this field is set, then the first matched
-	// protocol is sent to a client as negotiated.
-	Protocol func(string) bool
-
-	// Extension is the select function that is used to select extensions from
-	// list requested by client. If this field is set, then the all matched
-	// extensions are sent to a client as negotiated.
-	//
-	// Deprecated: use Negotiate instead.
-	Extension func(httphead.Option) bool
-
-	// Negotiate is the callback that is used to negotiate extensions from
-	// the client's offer. If this field is set, then the returned non-zero
-	// extensions are sent to the client as accepted extensions in the
-	// response.
-	//
-	// The argument is only valid until the Negotiate callback returns.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	Negotiate func(httphead.Option) (httphead.Option, error)
-}
-
-// Upgrade upgrades http connection to the websocket connection.
-//
-// It hijacks net.Conn from w and returns received net.Conn and
-// bufio.ReadWriter. On successful handshake it returns Handshake struct
-// describing handshake info.
-func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net.Conn, rw *bufio.ReadWriter, hs Handshake, err error) {
-	// Hijack connection first to get the ability to write rejection errors the
-	// same way as in Upgrader.
-	conn, rw, err = hijack(w)
-	if err != nil {
-		httpError(w, err.Error(), http.StatusInternalServerError)
-		return conn, rw, hs, err
-	}
-
-	// See https://tools.ietf.org/html/rfc6455#section-4.1
-	// The method of the request MUST be GET, and the HTTP version MUST be at least 1.1.
-	var nonce string
-	if r.Method != http.MethodGet {
-		err = ErrHandshakeBadMethod
-	} else if r.ProtoMajor < 1 || (r.ProtoMajor == 1 && r.ProtoMinor < 1) {
-		err = ErrHandshakeBadProtocol
-	} else if r.Host == "" {
-		err = ErrHandshakeBadHost
-	} else if u := httpGetHeader(r.Header, headerUpgradeCanonical); u != "websocket" && !strings.EqualFold(u, "websocket") {
-		err = ErrHandshakeBadUpgrade
-	} else if c := httpGetHeader(r.Header, headerConnectionCanonical); c != "Upgrade" && !strHasToken(c, "upgrade") {
-		err = ErrHandshakeBadConnection
-	} else if nonce = httpGetHeader(r.Header, headerSecKeyCanonical); len(nonce) != nonceSize {
-		err = ErrHandshakeBadSecKey
-	} else if v := httpGetHeader(r.Header, headerSecVersionCanonical); v != "13" {
-		// According to RFC6455:
-		//
-		// If this version does not match a version understood by the server,
-		// the server MUST abort the WebSocket handshake described in this
-		// section and instead send an appropriate HTTP error code (such as 426
-		// Upgrade Required) and a |Sec-WebSocket-Version| header field
-		// indicating the version(s) the server is capable of understanding.
-		//
-		// So we branching here cause empty or not present version does not
-		// meet the ABNF rules of RFC6455:
-		//
-		// version = DIGIT | (NZDIGIT DIGIT) |
-		// ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
-		// ; Limited to 0-255 range, with no leading zeros
-		//
-		// That is, if version is really invalid – we sent 426 status, if it
-		// not present or empty – it is 400.
-		if v != "" {
-			err = ErrHandshakeUpgradeRequired
-		} else {
-			err = ErrHandshakeBadSecVersion
-		}
-	}
-	if check := u.Protocol; err == nil && check != nil {
-		ps := r.Header[headerSecProtocolCanonical]
-		for i := 0; i < len(ps) && err == nil && hs.Protocol == ""; i++ {
-			var ok bool
-			hs.Protocol, ok = strSelectProtocol(ps[i], check)
-			if !ok {
-				err = ErrMalformedRequest
-			}
-		}
-	}
-	if f := u.Negotiate; err == nil && f != nil {
-		for _, h := range r.Header[headerSecExtensionsCanonical] {
-			hs.Extensions, err = negotiateExtensions(strToBytes(h), hs.Extensions, f)
-			if err != nil {
-				break
-			}
-		}
-	}
-	// DEPRECATED path.
-	if check := u.Extension; err == nil && check != nil && u.Negotiate == nil {
-		xs := r.Header[headerSecExtensionsCanonical]
-		for i := 0; i < len(xs) && err == nil; i++ {
-			var ok bool
-			hs.Extensions, ok = btsSelectExtensions(strToBytes(xs[i]), hs.Extensions, check)
-			if !ok {
-				err = ErrMalformedRequest
-			}
-		}
-	}
-
-	// Clear deadlines set by server.
-	conn.SetDeadline(noDeadline)
-	if t := u.Timeout; t != 0 {
-		conn.SetWriteDeadline(time.Now().Add(t))
-		defer conn.SetWriteDeadline(noDeadline)
-	}
-
-	var header handshakeHeader
-	if h := u.Header; h != nil {
-		header[0] = HandshakeHeaderHTTP(h)
-	}
-	if err == nil {
-		httpWriteResponseUpgrade(rw.Writer, strToBytes(nonce), hs, header.WriteTo)
-		err = rw.Writer.Flush()
-	} else {
-		var code int
-		if rej, ok := err.(*ConnectionRejectedError); ok {
-			code = rej.code
-			header[1] = rej.header
-		}
-		if code == 0 {
-			code = http.StatusInternalServerError
-		}
-		httpWriteResponseError(rw.Writer, err, code, header.WriteTo)
-		// Do not store Flush() error to not override already existing one.
-		_ = rw.Writer.Flush()
-	}
-	return conn, rw, hs, err
-}
-
-// Upgrader contains options for upgrading connection to websocket.
-type Upgrader struct {
-	// ReadBufferSize and WriteBufferSize is an I/O buffer sizes.
-	// They used to read and write http data while upgrading to WebSocket.
-	// Allocated buffers are pooled with sync.Pool to avoid extra allocations.
-	//
-	// If a size is zero then default value is used.
-	//
-	// Usually it is useful to set read buffer size bigger than write buffer
-	// size because incoming request could contain long header values, such as
-	// Cookie. Response, in other way, could be big only if user write multiple
-	// custom headers. Usually response takes less than 256 bytes.
-	ReadBufferSize, WriteBufferSize int
-
-	// Protocol is a select function that is used to select subprotocol
-	// from list requested by client. If this field is set, then the first matched
-	// protocol is sent to a client as negotiated.
-	//
-	// The argument is only valid until the callback returns.
-	Protocol func([]byte) bool
-
-	// ProtocolCustrom allow user to parse Sec-WebSocket-Protocol header manually.
-	// Note that returned bytes must be valid until Upgrade returns.
-	// If ProtocolCustom is set, it used instead of Protocol function.
-	ProtocolCustom func([]byte) (string, bool)
-
-	// Extension is a select function that is used to select extensions
-	// from list requested by client. If this field is set, then the all matched
-	// extensions are sent to a client as negotiated.
-	//
-	// Note that Extension may be called multiple times and implementations
-	// must track uniqueness of accepted extensions manually.
-	//
-	// The argument is only valid until the callback returns.
-	//
-	// According to the RFC6455 order of extensions passed by a client is
-	// significant. That is, returning true from this function means that no
-	// other extension with the same name should be checked because server
-	// accepted the most preferable extension right now:
-	// "Note that the order of extensions is significant.  Any interactions between
-	// multiple extensions MAY be defined in the documents defining the extensions.
-	// In the absence of such definitions, the interpretation is that the header
-	// fields listed by the client in its request represent a preference of the
-	// header fields it wishes to use, with the first options listed being most
-	// preferable."
-	//
-	// Deprecated: use Negotiate instead.
-	Extension func(httphead.Option) bool
-
-	// ExtensionCustom allow user to parse Sec-WebSocket-Extensions header
-	// manually.
-	//
-	// If ExtensionCustom() decides to accept received extension, it must
-	// append appropriate option to the given slice of httphead.Option.
-	// It returns results of append() to the given slice and a flag that
-	// reports whether given header value is wellformed or not.
-	//
-	// Note that ExtensionCustom may be called multiple times and
-	// implementations must track uniqueness of accepted extensions manually.
-	//
-	// Note that returned options should be valid until Upgrade returns.
-	// If ExtensionCustom is set, it used instead of Extension function.
-	ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool)
-
-	// Negotiate is the callback that is used to negotiate extensions from
-	// the client's offer. If this field is set, then the returned non-zero
-	// extensions are sent to the client as accepted extensions in the
-	// response.
-	//
-	// The argument is only valid until the Negotiate callback returns.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	Negotiate func(httphead.Option) (httphead.Option, error)
-
-	// Header is an optional HandshakeHeader instance that could be used to
-	// write additional headers to the handshake response.
-	//
-	// It used instead of any key-value mappings to avoid allocations in user
-	// land.
-	//
-	// Note that if present, it will be written in any result of handshake.
-	Header HandshakeHeader
-
-	// OnRequest is a callback that will be called after request line
-	// successful parsing.
-	//
-	// The arguments are only valid until the callback returns.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	OnRequest func(uri []byte) error
-
-	// OnHost is a callback that will be called after "Host" header successful
-	// parsing.
-	//
-	// It is separated from OnHeader callback because the Host header must be
-	// present in each request since HTTP/1.1. Thus Host header is non-optional
-	// and required for every WebSocket handshake.
-	//
-	// The arguments are only valid until the callback returns.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	OnHost func(host []byte) error
-
-	// OnHeader is a callback that will be called after successful parsing of
-	// header, that is not used during WebSocket handshake procedure. That is,
-	// it will be called with non-websocket headers, which could be relevant
-	// for application-level logic.
-	//
-	// The arguments are only valid until the callback returns.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	OnHeader func(key, value []byte) error
-
-	// OnBeforeUpgrade is a callback that will be called before sending
-	// successful upgrade response.
-	//
-	// Setting OnBeforeUpgrade allows user to make final application-level
-	// checks and decide whether this connection is allowed to successfully
-	// upgrade to WebSocket.
-	//
-	// It must return non-nil either HandshakeHeader or error and never both.
-	//
-	// If returned error is non-nil then connection is rejected and response is
-	// sent with appropriate HTTP error code and body set to error message.
-	//
-	// RejectConnectionError could be used to get more control on response.
-	OnBeforeUpgrade func() (header HandshakeHeader, err error)
-}
-
-// Upgrade zero-copy upgrades connection to WebSocket. It interprets given conn
-// as connection with incoming HTTP Upgrade request.
-//
-// It is a caller responsibility to manage i/o timeouts on conn.
-//
-// Non-nil error means that request for the WebSocket upgrade is invalid or
-// malformed and usually connection should be closed.
-// Even when error is non-nil Upgrade will write appropriate response into
-// connection in compliance with RFC.
-func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) {
-	// headerSeen constants helps to report whether or not some header was seen
-	// during reading request bytes.
-	const (
-		headerSeenHost = 1 << iota
-		headerSeenUpgrade
-		headerSeenConnection
-		headerSeenSecVersion
-		headerSeenSecKey
-
-		// headerSeenAll is the value that we expect to receive at the end of
-		// headers read/parse loop.
-		headerSeenAll = 0 |
-			headerSeenHost |
-			headerSeenUpgrade |
-			headerSeenConnection |
-			headerSeenSecVersion |
-			headerSeenSecKey
-	)
-
-	// Prepare I/O buffers.
-	// TODO(gobwas): make it configurable.
-	br := pbufio.GetReader(conn,
-		nonZero(u.ReadBufferSize, DefaultServerReadBufferSize),
-	)
-	bw := pbufio.GetWriter(conn,
-		nonZero(u.WriteBufferSize, DefaultServerWriteBufferSize),
-	)
-	defer func() {
-		pbufio.PutReader(br)
-		pbufio.PutWriter(bw)
-	}()
-
-	// Read HTTP request line like "GET /ws HTTP/1.1".
-	rl, err := readLine(br)
-	if err != nil {
-		return hs, err
-	}
-	// Parse request line data like HTTP version, uri and method.
-	req, err := httpParseRequestLine(rl)
-	if err != nil {
-		return hs, err
-	}
-
-	// Prepare stack-based handshake header list.
-	header := handshakeHeader{
-		0: u.Header,
-	}
-
-	// Parse and check HTTP request.
-	// As RFC6455 says:
-	//   The client's opening handshake consists of the following parts. If the
-	//   server, while reading the handshake, finds that the client did not
-	//   send a handshake that matches the description below (note that as per
-	//   [RFC2616], the order of the header fields is not important), including
-	//   but not limited to any violations of the ABNF grammar specified for
-	//   the components of the handshake, the server MUST stop processing the
-	//   client's handshake and return an HTTP response with an appropriate
-	//   error code (such as 400 Bad Request).
-	//
-	// See https://tools.ietf.org/html/rfc6455#section-4.2.1
-
-	// An HTTP/1.1 or higher GET request, including a "Request-URI".
-	//
-	// Even if RFC says "1.1 or higher" without mentioning the part of the
-	// version, we apply it only to minor part.
-	switch {
-	case req.major != 1 || req.minor < 1:
-		// Abort processing the whole request because we do not even know how
-		// to actually parse it.
-		err = ErrHandshakeBadProtocol
-
-	case btsToString(req.method) != http.MethodGet:
-		err = ErrHandshakeBadMethod
-
-	default:
-		if onRequest := u.OnRequest; onRequest != nil {
-			err = onRequest(req.uri)
-		}
-	}
-	// Start headers read/parse loop.
-	var (
-		// headerSeen reports which header was seen by setting corresponding
-		// bit on.
-		headerSeen byte
-
-		nonce = make([]byte, nonceSize)
-	)
-	for err == nil {
-		line, e := readLine(br)
-		if e != nil {
-			return hs, e
-		}
-		if len(line) == 0 {
-			// Blank line, no more lines to read.
-			break
-		}
-
-		k, v, ok := httpParseHeaderLine(line)
-		if !ok {
-			err = ErrMalformedRequest
-			break
-		}
-
-		switch btsToString(k) {
-		case headerHostCanonical:
-			headerSeen |= headerSeenHost
-			if onHost := u.OnHost; onHost != nil {
-				err = onHost(v)
-			}
-
-		case headerUpgradeCanonical:
-			headerSeen |= headerSeenUpgrade
-			if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) {
-				err = ErrHandshakeBadUpgrade
-			}
-
-		case headerConnectionCanonical:
-			headerSeen |= headerSeenConnection
-			if !bytes.Equal(v, specHeaderValueConnection) && !btsHasToken(v, specHeaderValueConnectionLower) {
-				err = ErrHandshakeBadConnection
-			}
-
-		case headerSecVersionCanonical:
-			headerSeen |= headerSeenSecVersion
-			if !bytes.Equal(v, specHeaderValueSecVersion) {
-				err = ErrHandshakeUpgradeRequired
-			}
-
-		case headerSecKeyCanonical:
-			headerSeen |= headerSeenSecKey
-			if len(v) != nonceSize {
-				err = ErrHandshakeBadSecKey
-			} else {
-				copy(nonce, v)
-			}
-
-		case headerSecProtocolCanonical:
-			if custom, check := u.ProtocolCustom, u.Protocol; hs.Protocol == "" && (custom != nil || check != nil) {
-				var ok bool
-				if custom != nil {
-					hs.Protocol, ok = custom(v)
-				} else {
-					hs.Protocol, ok = btsSelectProtocol(v, check)
-				}
-				if !ok {
-					err = ErrMalformedRequest
-				}
-			}
-
-		case headerSecExtensionsCanonical:
-			if f := u.Negotiate; err == nil && f != nil {
-				hs.Extensions, err = negotiateExtensions(v, hs.Extensions, f)
-			}
-			// DEPRECATED path.
-			if custom, check := u.ExtensionCustom, u.Extension; u.Negotiate == nil && (custom != nil || check != nil) {
-				var ok bool
-				if custom != nil {
-					hs.Extensions, ok = custom(v, hs.Extensions)
-				} else {
-					hs.Extensions, ok = btsSelectExtensions(v, hs.Extensions, check)
-				}
-				if !ok {
-					err = ErrMalformedRequest
-				}
-			}
-
-		default:
-			if onHeader := u.OnHeader; onHeader != nil {
-				err = onHeader(k, v)
-			}
-		}
-	}
-	switch {
-	case err == nil && headerSeen != headerSeenAll:
-		switch {
-		case headerSeen&headerSeenHost == 0:
-			// As RFC2616 says:
-			//   A client MUST include a Host header field in all HTTP/1.1
-			//   request messages. If the requested URI does not include an
-			//   Internet host name for the service being requested, then the
-			//   Host header field MUST be given with an empty value. An
-			//   HTTP/1.1 proxy MUST ensure that any request message it
-			//   forwards does contain an appropriate Host header field that
-			//   identifies the service being requested by the proxy. All
-			//   Internet-based HTTP/1.1 servers MUST respond with a 400 (Bad
-			//   Request) status code to any HTTP/1.1 request message which
-			//   lacks a Host header field.
-			err = ErrHandshakeBadHost
-		case headerSeen&headerSeenUpgrade == 0:
-			err = ErrHandshakeBadUpgrade
-		case headerSeen&headerSeenConnection == 0:
-			err = ErrHandshakeBadConnection
-		case headerSeen&headerSeenSecVersion == 0:
-			// In case of empty or not present version we do not send 426 status,
-			// because it does not meet the ABNF rules of RFC6455:
-			//
-			// version = DIGIT | (NZDIGIT DIGIT) |
-			// ("1" DIGIT DIGIT) | ("2" DIGIT DIGIT)
-			// ; Limited to 0-255 range, with no leading zeros
-			//
-			// That is, if version is really invalid – we sent 426 status as above, if it
-			// not present – it is 400.
-			err = ErrHandshakeBadSecVersion
-		case headerSeen&headerSeenSecKey == 0:
-			err = ErrHandshakeBadSecKey
-		default:
-			panic("unknown headers state")
-		}
-
-	case err == nil && u.OnBeforeUpgrade != nil:
-		header[1], err = u.OnBeforeUpgrade()
-	}
-	if err != nil {
-		var code int
-		if rej, ok := err.(*ConnectionRejectedError); ok {
-			code = rej.code
-			header[1] = rej.header
-		}
-		if code == 0 {
-			code = http.StatusInternalServerError
-		}
-		httpWriteResponseError(bw, err, code, header.WriteTo)
-		// Do not store Flush() error to not override already existing one.
-		_ = bw.Flush()
-		return hs, err
-	}
-
-	httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo)
-	err = bw.Flush()
-
-	return hs, err
-}
-
-type handshakeHeader [2]HandshakeHeader
-
-func (hs handshakeHeader) WriteTo(w io.Writer) (n int64, err error) {
-	for i := 0; i < len(hs) && err == nil; i++ {
-		if h := hs[i]; h != nil {
-			var m int64
-			m, err = h.WriteTo(w)
-			n += m
-		}
-	}
-	return n, err
-}
diff --git a/vendor/github.com/gobwas/ws/util.go b/vendor/github.com/gobwas/ws/util.go
deleted file mode 100644
index 1dd5aa6..0000000
--- a/vendor/github.com/gobwas/ws/util.go
+++ /dev/null
@@ -1,199 +0,0 @@
-package ws
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-
-	"github.com/gobwas/httphead"
-)
-
-// SelectFromSlice creates accept function that could be used as Protocol/Extension
-// select during upgrade.
-func SelectFromSlice(accept []string) func(string) bool {
-	if len(accept) > 16 {
-		mp := make(map[string]struct{}, len(accept))
-		for _, p := range accept {
-			mp[p] = struct{}{}
-		}
-		return func(p string) bool {
-			_, ok := mp[p]
-			return ok
-		}
-	}
-	return func(p string) bool {
-		for _, ok := range accept {
-			if p == ok {
-				return true
-			}
-		}
-		return false
-	}
-}
-
-// SelectEqual creates accept function that could be used as Protocol/Extension
-// select during upgrade.
-func SelectEqual(v string) func(string) bool {
-	return func(p string) bool {
-		return v == p
-	}
-}
-
-// asciiToInt converts bytes to int.
-func asciiToInt(bts []byte) (ret int, err error) {
-	// ASCII numbers all start with the high-order bits 0011.
-	// If you see that, and the next bits are 0-9 (0000 - 1001) you can grab those
-	// bits and interpret them directly as an integer.
-	var n int
-	if n = len(bts); n < 1 {
-		return 0, fmt.Errorf("converting empty bytes to int")
-	}
-	for i := 0; i < n; i++ {
-		if bts[i]&0xf0 != 0x30 {
-			return 0, fmt.Errorf("%s is not a numeric character", string(bts[i]))
-		}
-		ret += int(bts[i]&0xf) * pow(10, n-i-1)
-	}
-	return ret, nil
-}
-
-// pow for integers implementation.
-// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3.
-func pow(a, b int) int {
-	p := 1
-	for b > 0 {
-		if b&1 != 0 {
-			p *= a
-		}
-		b >>= 1
-		a *= a
-	}
-	return p
-}
-
-func bsplit3(bts []byte, sep byte) (b1, b2, b3 []byte) {
-	a := bytes.IndexByte(bts, sep)
-	b := bytes.IndexByte(bts[a+1:], sep)
-	if a == -1 || b == -1 {
-		return bts, nil, nil
-	}
-	b += a + 1
-	return bts[:a], bts[a+1 : b], bts[b+1:]
-}
-
-func btrim(bts []byte) []byte {
-	var i, j int
-	for i = 0; i < len(bts) && (bts[i] == ' ' || bts[i] == '\t'); {
-		i++
-	}
-	for j = len(bts); j > i && (bts[j-1] == ' ' || bts[j-1] == '\t'); {
-		j--
-	}
-	return bts[i:j]
-}
-
-func strHasToken(header, token string) (has bool) {
-	return btsHasToken(strToBytes(header), strToBytes(token))
-}
-
-func btsHasToken(header, token []byte) (has bool) {
-	httphead.ScanTokens(header, func(v []byte) bool {
-		has = bytes.EqualFold(v, token)
-		return !has
-	})
-	return has
-}
-
-const (
-	toLower  = 'a' - 'A'      // for use with OR.
-	toUpper  = ^byte(toLower) // for use with AND.
-	toLower8 = uint64(toLower) |
-		uint64(toLower)<<8 |
-		uint64(toLower)<<16 |
-		uint64(toLower)<<24 |
-		uint64(toLower)<<32 |
-		uint64(toLower)<<40 |
-		uint64(toLower)<<48 |
-		uint64(toLower)<<56
-)
-
-// Algorithm below is like standard textproto/CanonicalMIMEHeaderKey, except
-// that it operates with slice of bytes and modifies it inplace without copying.
-func canonicalizeHeaderKey(k []byte) {
-	upper := true
-	for i, c := range k {
-		if upper && 'a' <= c && c <= 'z' {
-			k[i] &= toUpper
-		} else if !upper && 'A' <= c && c <= 'Z' {
-			k[i] |= toLower
-		}
-		upper = c == '-'
-	}
-}
-
-// readLine reads line from br. It reads until '\n' and returns bytes without
-// '\n' or '\r\n' at the end.
-// It returns err if and only if line does not end in '\n'. Note that read
-// bytes returned in any case of error.
-//
-// It is much like the textproto/Reader.ReadLine() except the thing that it
-// returns raw bytes, instead of string. That is, it avoids copying bytes read
-// from br.
-//
-// textproto/Reader.ReadLineBytes() is also makes copy of resulting bytes to be
-// safe with future I/O operations on br.
-//
-// We could control I/O operations on br and do not need to make additional
-// copy for safety.
-//
-// NOTE: it may return copied flag to notify that returned buffer is safe to
-// use.
-func readLine(br *bufio.Reader) ([]byte, error) {
-	var line []byte
-	for {
-		bts, err := br.ReadSlice('\n')
-		if err == bufio.ErrBufferFull {
-			// Copy bytes because next read will discard them.
-			line = append(line, bts...)
-			continue
-		}
-
-		// Avoid copy of single read.
-		if line == nil {
-			line = bts
-		} else {
-			line = append(line, bts...)
-		}
-
-		if err != nil {
-			return line, err
-		}
-
-		// Size of line is at least 1.
-		// In other case bufio.ReadSlice() returns error.
-		n := len(line)
-
-		// Cut '\n' or '\r\n'.
-		if n > 1 && line[n-2] == '\r' {
-			line = line[:n-2]
-		} else {
-			line = line[:n-1]
-		}
-
-		return line, nil
-	}
-}
-
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
-
-func nonZero(a, b int) int {
-	if a != 0 {
-		return a
-	}
-	return b
-}
diff --git a/vendor/github.com/gobwas/ws/util_purego.go b/vendor/github.com/gobwas/ws/util_purego.go
deleted file mode 100644
index 449b3fd..0000000
--- a/vendor/github.com/gobwas/ws/util_purego.go
+++ /dev/null
@@ -1,12 +0,0 @@
-//go:build purego
-// +build purego
-
-package ws
-
-func strToBytes(str string) (bts []byte) {
-	return []byte(str)
-}
-
-func btsToString(bts []byte) (str string) {
-	return string(bts)
-}
diff --git a/vendor/github.com/gobwas/ws/util_unsafe.go b/vendor/github.com/gobwas/ws/util_unsafe.go
deleted file mode 100644
index b732297..0000000
--- a/vendor/github.com/gobwas/ws/util_unsafe.go
+++ /dev/null
@@ -1,22 +0,0 @@
-//go:build !purego
-// +build !purego
-
-package ws
-
-import (
-	"reflect"
-	"unsafe"
-)
-
-func strToBytes(str string) (bts []byte) {
-	s := (*reflect.StringHeader)(unsafe.Pointer(&str))
-	b := (*reflect.SliceHeader)(unsafe.Pointer(&bts))
-	b.Data = s.Data
-	b.Len = s.Len
-	b.Cap = s.Len
-	return bts
-}
-
-func btsToString(bts []byte) (str string) {
-	return *(*string)(unsafe.Pointer(&bts))
-}
diff --git a/vendor/github.com/gobwas/ws/write.go b/vendor/github.com/gobwas/ws/write.go
deleted file mode 100644
index 94557c6..0000000
--- a/vendor/github.com/gobwas/ws/write.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package ws
-
-import (
-	"encoding/binary"
-	"io"
-)
-
-// Header size length bounds in bytes.
-const (
-	MaxHeaderSize = 14
-	MinHeaderSize = 2
-)
-
-const (
-	bit0 = 0x80
-	bit1 = 0x40
-	bit2 = 0x20
-	bit3 = 0x10
-	bit4 = 0x08
-	bit5 = 0x04
-	bit6 = 0x02
-	bit7 = 0x01
-
-	len7  = int64(125)
-	len16 = int64(^(uint16(0)))
-	len64 = int64(^(uint64(0)) >> 1)
-)
-
-// HeaderSize returns number of bytes that are needed to encode given header.
-// It returns -1 if header is malformed.
-func HeaderSize(h Header) (n int) {
-	switch {
-	case h.Length < 126:
-		n = 2
-	case h.Length <= len16:
-		n = 4
-	case h.Length <= len64:
-		n = 10
-	default:
-		return -1
-	}
-	if h.Masked {
-		n += len(h.Mask)
-	}
-	return n
-}
-
-// WriteHeader writes header binary representation into w.
-func WriteHeader(w io.Writer, h Header) error {
-	// Make slice of bytes with capacity 14 that could hold any header.
-	bts := make([]byte, MaxHeaderSize)
-
-	if h.Fin {
-		bts[0] |= bit0
-	}
-	bts[0] |= h.Rsv << 4
-	bts[0] |= byte(h.OpCode)
-
-	var n int
-	switch {
-	case h.Length <= len7:
-		bts[1] = byte(h.Length)
-		n = 2
-
-	case h.Length <= len16:
-		bts[1] = 126
-		binary.BigEndian.PutUint16(bts[2:4], uint16(h.Length))
-		n = 4
-
-	case h.Length <= len64:
-		bts[1] = 127
-		binary.BigEndian.PutUint64(bts[2:10], uint64(h.Length))
-		n = 10
-
-	default:
-		return ErrHeaderLengthUnexpected
-	}
-
-	if h.Masked {
-		bts[1] |= bit0
-		n += copy(bts[n:], h.Mask[:])
-	}
-
-	_, err := w.Write(bts[:n])
-
-	return err
-}
-
-// WriteFrame writes frame binary representation into w.
-func WriteFrame(w io.Writer, f Frame) error {
-	err := WriteHeader(w, f.Header)
-	if err != nil {
-		return err
-	}
-	_, err = w.Write(f.Payload)
-	return err
-}
-
-// MustWriteFrame is like WriteFrame but panics if frame can not be read.
-func MustWriteFrame(w io.Writer, f Frame) {
-	if err := WriteFrame(w, f); err != nil {
-		panic(err)
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/cbuf.go b/vendor/github.com/gobwas/ws/wsflate/cbuf.go
deleted file mode 100644
index 5e2c445..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/cbuf.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package wsflate
-
-import (
-	"io"
-)
-
-// cbuf is a tiny proxy-buffer that writes all but 4 last bytes to the
-// destination.
-type cbuf struct {
-	buf [4]byte
-	n   int
-	dst io.Writer
-	err error
-}
-
-// Write implements io.Writer interface.
-func (c *cbuf) Write(p []byte) (int, error) {
-	if c.err != nil {
-		return 0, c.err
-	}
-	head, tail := c.split(p)
-	n := c.n + len(tail)
-	if n > len(c.buf) {
-		x := n - len(c.buf)
-		c.flush(c.buf[:x])
-		copy(c.buf[:], c.buf[x:])
-		c.n -= x
-	}
-	if len(head) > 0 {
-		c.flush(head)
-	}
-	copy(c.buf[c.n:], tail)
-	c.n = min(c.n+len(tail), len(c.buf))
-	return len(p), c.err
-}
-
-func (c *cbuf) flush(p []byte) {
-	if c.err == nil {
-		_, c.err = c.dst.Write(p)
-	}
-}
-
-func (c *cbuf) split(p []byte) (head, tail []byte) {
-	if n := len(p); n > len(c.buf) {
-		x := n - len(c.buf)
-		head = p[:x]
-		tail = p[x:]
-		return head, tail
-	}
-	return nil, p
-}
-
-func (c *cbuf) reset(dst io.Writer) {
-	c.n = 0
-	c.err = nil
-	c.buf = [4]byte{0, 0, 0, 0}
-	c.dst = dst
-}
-
-type suffixedReader struct {
-	r      io.Reader
-	pos    int // position in the suffix.
-	suffix [9]byte
-
-	rx struct{ io.Reader }
-}
-
-func (r *suffixedReader) iface() io.Reader {
-	if _, ok := r.r.(io.ByteReader); ok {
-		// If source io.Reader implements io.ByteReader, return full set of
-		// methods from suffixedReader struct (Read() and ReadByte()).
-		// This actually is an optimization needed for those Decompressor
-		// implementations (such as default flate.Reader) which do check if
-		// given source is already "buffered" by checking if source implements
-		// io.ByteReader. So without this checks we will always result in
-		// double-buffering for default decompressors.
-		return r
-	}
-	// Source io.Reader doesn't support io.ByteReader, so we should cut off the
-	// ReadByte() method from suffixedReader struct. We use r.srx field to
-	// avoid allocations.
-	r.rx.Reader = r
-	return &r.rx
-}
-
-func (r *suffixedReader) Read(p []byte) (n int, err error) {
-	if r.r != nil {
-		n, err = r.r.Read(p)
-		if err == io.EOF {
-			err = nil
-			r.r = nil
-		}
-		return n, err
-	}
-	if r.pos >= len(r.suffix) {
-		return 0, io.EOF
-	}
-	n = copy(p, r.suffix[r.pos:])
-	r.pos += n
-	return n, nil
-}
-
-func (r *suffixedReader) ReadByte() (b byte, err error) {
-	if r.r != nil {
-		br, ok := r.r.(io.ByteReader)
-		if !ok {
-			panic("wsflate: internal error: incorrect use of suffixedReader")
-		}
-		b, err = br.ReadByte()
-		if err == io.EOF {
-			err = nil
-			r.r = nil
-		}
-		return b, err
-	}
-	if r.pos >= len(r.suffix) {
-		return 0, io.EOF
-	}
-	b = r.suffix[r.pos]
-	r.pos++
-	return b, nil
-}
-
-func (r *suffixedReader) reset(src io.Reader) {
-	r.r = src
-	r.pos = 0
-}
-
-func min(a, b int) int {
-	if a < b {
-		return a
-	}
-	return b
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/extension.go b/vendor/github.com/gobwas/ws/wsflate/extension.go
deleted file mode 100644
index c8d9934..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/extension.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package wsflate
-
-import (
-	"bytes"
-
-	"github.com/gobwas/httphead"
-	"github.com/gobwas/ws"
-)
-
-// Extension contains logic of compression extension parameters negotiation
-// made during HTTP WebSocket handshake.
-// It might be reused between different upgrades (but not concurrently) with
-// Reset() being called after each.
-type Extension struct {
-	// Parameters is specification of extension parameters server is going to
-	// accept.
-	Parameters Parameters
-
-	accepted bool
-	params   Parameters
-}
-
-// Negotiate parses given HTTP header option and returns (if any) header option
-// which describes accepted parameters.
-//
-// It may return zero option (i.e. one which Size() returns 0) alongside with
-// nil error.
-func (n *Extension) Negotiate(opt httphead.Option) (accept httphead.Option, err error) {
-	if !bytes.Equal(opt.Name, ExtensionNameBytes) {
-		return accept, nil
-	}
-	if n.accepted {
-		// Negotiate might be called multiple times during upgrade.
-		// We stick to first one accepted extension since they must be passed
-		// in ordered by preference.
-		return accept, nil
-	}
-
-	want := n.Parameters
-
-	// NOTE: Parse() resets params inside, so no worries.
-	if err := n.params.Parse(opt); err != nil {
-		return accept, err
-	}
-	{
-		offer := n.params.ServerMaxWindowBits
-		want := want.ServerMaxWindowBits
-		if offer > want {
-			// A server declines an extension negotiation offer
-			// with this parameter if the server doesn't support
-			// it.
-			return accept, nil
-		}
-	}
-	{
-		// If a received extension negotiation offer has the
-		// "client_max_window_bits" extension parameter, the server MAY
-		// include the "client_max_window_bits" extension parameter in the
-		// corresponding extension negotiation response to the offer.
-		offer := n.params.ClientMaxWindowBits
-		want := want.ClientMaxWindowBits
-		if want > offer {
-			return accept, nil
-		}
-	}
-	{
-		offer := n.params.ServerNoContextTakeover
-		want := want.ServerNoContextTakeover
-		if offer && !want {
-			return accept, nil
-		}
-	}
-
-	n.accepted = true
-
-	return want.Option(), nil
-}
-
-// Accepted returns parameters parsed during last negotiation and a flag that
-// reports whether they were accepted.
-func (n *Extension) Accepted() (_ Parameters, accepted bool) {
-	return n.params, n.accepted
-}
-
-// Reset resets extension for further reuse.
-func (n *Extension) Reset() {
-	n.accepted = false
-	n.params = Parameters{}
-}
-
-var ErrUnexpectedCompressionBit = ws.ProtocolError(
-	"control frame or non-first fragment of data contains compression bit set",
-)
-
-// UnsetBit clears the Per-Message Compression bit in header h and returns its
-// modified copy. It reports whether compression bit was set in header h.
-// It returns non-nil error if compression bit has unexpected value.
-//
-// This function's main purpose is to be compatible with "Framing" section of
-// the Compression Extensions for WebSocket RFC. If you don't need to work with
-// chains of extensions then IsCompressed() could be enough to check if
-// message is compressed.
-// See https://tools.ietf.org/html/rfc7692#section-6.2
-func UnsetBit(h ws.Header) (_ ws.Header, wasSet bool, err error) {
-	var s MessageState
-	h, err = s.UnsetBits(h)
-	return h, s.IsCompressed(), err
-}
-
-// SetBit sets the Per-Message Compression bit in header h and returns its
-// modified copy.
-// It returns non-nil error if compression bit has unexpected value.
-func SetBit(h ws.Header) (_ ws.Header, err error) {
-	var s MessageState
-	s.SetCompressed(true)
-	return s.SetBits(h)
-}
-
-// IsCompressed reports whether the Per-Message Compression bit is set in
-// header h.
-// It returns non-nil error if compression bit has unexpected value.
-//
-// If you need to be fully compatible with Compression Extensions for WebSocket
-// RFC and work with chains of extensions, take a look at the UnsetBit()
-// instead. That is, IsCompressed() is a shortcut for UnsetBit() with reduced
-// number of return values.
-func IsCompressed(h ws.Header) (bool, error) {
-	_, isSet, err := UnsetBit(h)
-	return isSet, err
-}
-
-// MessageState holds message compression state.
-//
-// It is consulted during SetBits(h) call to make a decision whether we must
-// set the Per-Message Compression bit for given header h argument.
-// It is updated during UnsetBits(h) to reflect compression state of a message
-// represented by header h argument.
-// It can also be consulted/updated directly by calling
-// IsCompressed()/SetCompressed().
-//
-// In general MessageState should be used when there is no direct access to
-// connection to read frame from, but it is still needed to know if message
-// being read is compressed. For other cases SetBit() and UnsetBit() should be
-// used instead.
-//
-// NOTE: the compression state is updated during UnsetBits(h) only when header
-// h argument represents data (text or binary) frame.
-type MessageState struct {
-	compressed bool
-}
-
-// SetCompressed marks message as "compressed" or "uncompressed".
-// See https://tools.ietf.org/html/rfc7692#section-6
-func (s *MessageState) SetCompressed(v bool) {
-	s.compressed = v
-}
-
-// IsCompressed reports whether message is "compressed".
-// See https://tools.ietf.org/html/rfc7692#section-6
-func (s *MessageState) IsCompressed() bool {
-	return s.compressed
-}
-
-// UnsetBits changes RSV bits of the given frame header h as if compression
-// extension was negotiated. It returns modified copy of h and error if header
-// is malformed from the RFC perspective.
-func (s *MessageState) UnsetBits(h ws.Header) (ws.Header, error) {
-	r1, r2, r3 := ws.RsvBits(h.Rsv)
-	switch {
-	case h.OpCode.IsData() && h.OpCode != ws.OpContinuation:
-		h.Rsv = ws.Rsv(false, r2, r3)
-		s.SetCompressed(r1)
-		return h, nil
-
-	case r1:
-		// An endpoint MUST NOT set the "Per-Message Compressed"
-		// bit of control frames and non-first fragments of a data
-		// message. An endpoint receiving such a frame MUST _Fail
-		// the WebSocket Connection_.
-		return h, ErrUnexpectedCompressionBit
-
-	default:
-		// NOTE: do not change the state of s.compressed since UnsetBits()
-		// might also be called for (intermediate) control frames.
-		return h, nil
-	}
-}
-
-// SetBits changes RSV bits of the frame header h which is being send as if
-// compression extension was negotiated. It returns modified copy of h and
-// error if header is malformed from the RFC perspective.
-func (s *MessageState) SetBits(h ws.Header) (ws.Header, error) {
-	r1, r2, r3 := ws.RsvBits(h.Rsv)
-	if r1 {
-		return h, ErrUnexpectedCompressionBit
-	}
-	if !h.OpCode.IsData() || h.OpCode == ws.OpContinuation {
-		// An endpoint MUST NOT set the "Per-Message Compressed"
-		// bit of control frames and non-first fragments of a data
-		// message. An endpoint receiving such a frame MUST _Fail
-		// the WebSocket Connection_.
-		return h, nil
-	}
-	if s.IsCompressed() {
-		h.Rsv = ws.Rsv(true, r2, r3)
-	}
-	return h, nil
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/helper.go b/vendor/github.com/gobwas/ws/wsflate/helper.go
deleted file mode 100644
index eae94d7..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/helper.go
+++ /dev/null
@@ -1,195 +0,0 @@
-package wsflate
-
-import (
-	"bytes"
-	"compress/flate"
-	"fmt"
-	"io"
-
-	"github.com/gobwas/ws"
-)
-
-// DefaultHelper is a default helper instance holding standard library's
-// `compress/flate` compressor and decompressor under the hood.
-//
-// Note that use of DefaultHelper methods assumes that DefaultParameters were
-// used for extension negotiation during WebSocket handshake.
-var DefaultHelper = Helper{
-	Compressor: func(w io.Writer) Compressor {
-		// No error can be returned here as NewWriter() doc says.
-		f, _ := flate.NewWriter(w, 9)
-		return f
-	},
-	Decompressor: func(r io.Reader) Decompressor {
-		return flate.NewReader(r)
-	},
-}
-
-// DefaultParameters holds deflate extension parameters which are assumed by
-// DefaultHelper to be used during WebSocket handshake.
-var DefaultParameters = Parameters{
-	ServerNoContextTakeover: true,
-	ClientNoContextTakeover: true,
-}
-
-// CompressFrame is a shortcut for DefaultHelper.CompressFrame().
-//
-// Note that use of DefaultHelper methods assumes that DefaultParameters were
-// used for extension negotiation during WebSocket handshake.
-func CompressFrame(f ws.Frame) (ws.Frame, error) {
-	return DefaultHelper.CompressFrame(f)
-}
-
-// CompressFrameBuffer is a shortcut for DefaultHelper.CompressFrameBuffer().
-//
-// Note that use of DefaultHelper methods assumes that DefaultParameters were
-// used for extension negotiation during WebSocket handshake.
-func CompressFrameBuffer(buf Buffer, f ws.Frame) (ws.Frame, error) {
-	return DefaultHelper.CompressFrameBuffer(buf, f)
-}
-
-// DecompressFrame is a shortcut for DefaultHelper.DecompressFrame().
-//
-// Note that use of DefaultHelper methods assumes that DefaultParameters were
-// used for extension negotiation during WebSocket handshake.
-func DecompressFrame(f ws.Frame) (ws.Frame, error) {
-	return DefaultHelper.DecompressFrame(f)
-}
-
-// DecompressFrameBuffer is a shortcut for
-// DefaultHelper.DecompressFrameBuffer().
-//
-// Note that use of DefaultHelper methods assumes that DefaultParameters were
-// used for extension negotiation during WebSocket handshake.
-func DecompressFrameBuffer(buf Buffer, f ws.Frame) (ws.Frame, error) {
-	return DefaultHelper.DecompressFrameBuffer(buf, f)
-}
-
-// Helper is a helper struct that holds common code for compression and
-// decompression bytes or WebSocket frames.
-//
-// Its purpose is to reduce boilerplate code in WebSocket applications.
-type Helper struct {
-	Compressor   func(w io.Writer) Compressor
-	Decompressor func(r io.Reader) Decompressor
-}
-
-// Buffer is an interface representing some bytes buffering object.
-type Buffer interface {
-	io.Writer
-	Bytes() []byte
-}
-
-// CompressFrame returns compressed version of a frame.
-// Note that it does memory allocations internally. To control those
-// allocations consider using CompressFrameBuffer().
-func (h *Helper) CompressFrame(in ws.Frame) (f ws.Frame, err error) {
-	var buf bytes.Buffer
-	return h.CompressFrameBuffer(&buf, in)
-}
-
-// DecompressFrame returns decompressed version of a frame.
-// Note that it does memory allocations internally. To control those
-// allocations consider using DecompressFrameBuffer().
-func (h *Helper) DecompressFrame(in ws.Frame) (f ws.Frame, err error) {
-	var buf bytes.Buffer
-	return h.DecompressFrameBuffer(&buf, in)
-}
-
-// CompressFrameBuffer compresses a frame using given buffer.
-// Returned frame's payload holds bytes returned by buf.Bytes().
-func (h *Helper) CompressFrameBuffer(buf Buffer, f ws.Frame) (ws.Frame, error) {
-	if !f.Header.Fin {
-		return f, fmt.Errorf("wsflate: fragmented messages are not allowed")
-	}
-	if err := h.CompressTo(buf, f.Payload); err != nil {
-		return f, err
-	}
-	var err error
-	f.Payload = buf.Bytes()
-	f.Header.Length = int64(len(f.Payload))
-	f.Header, err = SetBit(f.Header)
-	if err != nil {
-		return f, err
-	}
-	return f, nil
-}
-
-// DecompressFrameBuffer decompresses a frame using given buffer.
-// Returned frame's payload holds bytes returned by buf.Bytes().
-func (h *Helper) DecompressFrameBuffer(buf Buffer, f ws.Frame) (ws.Frame, error) {
-	if !f.Header.Fin {
-		return f, fmt.Errorf(
-			"wsflate: fragmented messages are not supported by helper",
-		)
-	}
-	var (
-		compressed bool
-		err        error
-	)
-	f.Header, compressed, err = UnsetBit(f.Header)
-	if err != nil {
-		return f, err
-	}
-	if !compressed {
-		return f, nil
-	}
-	if err := h.DecompressTo(buf, f.Payload); err != nil {
-		return f, err
-	}
-
-	f.Payload = buf.Bytes()
-	f.Header.Length = int64(len(f.Payload))
-
-	return f, nil
-}
-
-// Compress compresses given bytes.
-// Note that it does memory allocations internally. To control those
-// allocations consider using CompressTo().
-func (h *Helper) Compress(p []byte) ([]byte, error) {
-	var buf bytes.Buffer
-	if err := h.CompressTo(&buf, p); err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), nil
-}
-
-// Decompress decompresses given bytes.
-// Note that it does memory allocations internally. To control those
-// allocations consider using DecompressTo().
-func (h *Helper) Decompress(p []byte) ([]byte, error) {
-	var buf bytes.Buffer
-	if err := h.DecompressTo(&buf, p); err != nil {
-		return nil, err
-	}
-	return buf.Bytes(), nil
-}
-
-// CompressTo compresses bytes into given buffer.
-func (h *Helper) CompressTo(w io.Writer, p []byte) (err error) {
-	c := NewWriter(w, h.Compressor)
-	if _, err = c.Write(p); err != nil {
-		return err
-	}
-	if err := c.Flush(); err != nil {
-		return err
-	}
-	if err := c.Close(); err != nil {
-		return err
-	}
-	return nil
-}
-
-// DecompressTo decompresses bytes into given buffer.
-// Returned bytes are bytes returned by buf.Bytes().
-func (h *Helper) DecompressTo(w io.Writer, p []byte) (err error) {
-	fr := NewReader(bytes.NewReader(p), h.Decompressor)
-	if _, err = io.Copy(w, fr); err != nil {
-		return err
-	}
-	if err := fr.Close(); err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/parameters.go b/vendor/github.com/gobwas/ws/wsflate/parameters.go
deleted file mode 100644
index 3f2691c..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/parameters.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package wsflate
-
-import (
-	"fmt"
-	"strconv"
-
-	"github.com/gobwas/httphead"
-)
-
-const (
-	ExtensionName = "permessage-deflate"
-
-	serverNoContextTakeover = "server_no_context_takeover"
-	clientNoContextTakeover = "client_no_context_takeover"
-	serverMaxWindowBits     = "server_max_window_bits"
-	clientMaxWindowBits     = "client_max_window_bits"
-)
-
-var (
-	ExtensionNameBytes = []byte(ExtensionName)
-
-	serverNoContextTakeoverBytes = []byte(serverNoContextTakeover)
-	clientNoContextTakeoverBytes = []byte(clientNoContextTakeover)
-	serverMaxWindowBitsBytes     = []byte(serverMaxWindowBits)
-	clientMaxWindowBitsBytes     = []byte(clientMaxWindowBits)
-)
-
-var windowBits [8][]byte
-
-func init() {
-	for i := range windowBits {
-		windowBits[i] = []byte(strconv.Itoa(i + 8))
-	}
-}
-
-// Parameters contains compression extension options.
-type Parameters struct {
-	ServerNoContextTakeover bool
-	ClientNoContextTakeover bool
-	ServerMaxWindowBits     WindowBits
-	ClientMaxWindowBits     WindowBits
-}
-
-// WindowBits specifies window size accordingly to RFC.
-// Use its Bytes() method to obtain actual size of window in bytes.
-type WindowBits byte
-
-// Defined reports whether window bits were specified.
-func (b WindowBits) Defined() bool {
-	return b > 0
-}
-
-// Bytes returns window size in number of bytes.
-func (b WindowBits) Bytes() int {
-	return 1 << uint(b)
-}
-
-const (
-	MaxLZ77WindowSize = 32768 // 2^15
-)
-
-// Parse reads parameters from given HTTP header option accordingly to RFC.
-//
-// It returns non-nil error at least in these cases:
-//   - The negotiation offer contains an extension parameter not defined for
-//     use in an offer/response.
-//   - The negotiation offer/response contains an extension parameter with an
-//     invalid value.
-//   - The negotiation offer/response contains multiple extension parameters
-//     with the same name.
-func (p *Parameters) Parse(opt httphead.Option) (err error) {
-	const (
-		clientMaxWindowBitsSeen = 1 << iota
-		serverMaxWindowBitsSeen
-		clientNoContextTakeoverSeen
-		serverNoContextTakeoverSeen
-	)
-
-	// Reset to not mix parsed data from previous Parse() calls.
-	*p = Parameters{}
-
-	var seen byte
-	opt.Parameters.ForEach(func(key, val []byte) (ok bool) {
-		switch string(key) {
-		case clientMaxWindowBits:
-			if len(val) == 0 {
-				p.ClientMaxWindowBits = 1
-				return true
-			}
-			if seen&clientMaxWindowBitsSeen != 0 {
-				err = paramError("duplicate", key, val)
-				return false
-			}
-			seen |= clientMaxWindowBitsSeen
-			if p.ClientMaxWindowBits, ok = bitsFromASCII(val); !ok {
-				err = paramError("invalid", key, val)
-				return false
-			}
-
-		case serverMaxWindowBits:
-			if len(val) == 0 {
-				err = paramError("invalid", key, val)
-				return false
-			}
-			if seen&serverMaxWindowBitsSeen != 0 {
-				err = paramError("duplicate", key, val)
-				return false
-			}
-			seen |= serverMaxWindowBitsSeen
-			if p.ServerMaxWindowBits, ok = bitsFromASCII(val); !ok {
-				err = paramError("invalid", key, val)
-				return false
-			}
-
-		case clientNoContextTakeover:
-			if len(val) > 0 {
-				err = paramError("invalid", key, val)
-				return false
-			}
-			if seen&clientNoContextTakeoverSeen != 0 {
-				err = paramError("duplicate", key, val)
-				return false
-			}
-			seen |= clientNoContextTakeoverSeen
-			p.ClientNoContextTakeover = true
-
-		case serverNoContextTakeover:
-			if len(val) > 0 {
-				err = paramError("invalid", key, val)
-				return false
-			}
-			if seen&serverNoContextTakeoverSeen != 0 {
-				err = paramError("duplicate", key, val)
-				return false
-			}
-			seen |= serverNoContextTakeoverSeen
-			p.ServerNoContextTakeover = true
-
-		default:
-			err = paramError("unexpected", key, val)
-			return false
-		}
-		return true
-	})
-	return err
-}
-
-// Option encodes parameters into HTTP header option.
-func (p Parameters) Option() httphead.Option {
-	opt := httphead.Option{
-		Name: ExtensionNameBytes,
-	}
-	setBool(&opt, serverNoContextTakeoverBytes, p.ServerNoContextTakeover)
-	setBool(&opt, clientNoContextTakeoverBytes, p.ClientNoContextTakeover)
-	setBits(&opt, serverMaxWindowBitsBytes, p.ServerMaxWindowBits)
-	setBits(&opt, clientMaxWindowBitsBytes, p.ClientMaxWindowBits)
-	return opt
-}
-
-func isValidBits(x int) bool {
-	return 8 <= x && x <= 15
-}
-
-func bitsFromASCII(p []byte) (WindowBits, bool) {
-	n, ok := httphead.IntFromASCII(p)
-	if !ok || !isValidBits(n) {
-		return 0, false
-	}
-	return WindowBits(n), true
-}
-
-func setBits(opt *httphead.Option, name []byte, bits WindowBits) {
-	if bits == 0 {
-		return
-	}
-	if bits == 1 {
-		opt.Parameters.Set(name, nil)
-		return
-	}
-	if !isValidBits(int(bits)) {
-		panic(fmt.Sprintf("wsflate: invalid bits value: %d", bits))
-	}
-	opt.Parameters.Set(name, windowBits[bits-8])
-}
-
-func setBool(opt *httphead.Option, name []byte, flag bool) {
-	if flag {
-		opt.Parameters.Set(name, nil)
-	}
-}
-
-func paramError(reason string, key, val []byte) error {
-	return fmt.Errorf(
-		"wsflate: %s extension parameter %q: %q",
-		reason, key, val,
-	)
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/reader.go b/vendor/github.com/gobwas/ws/wsflate/reader.go
deleted file mode 100644
index 8f0f660..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/reader.go
+++ /dev/null
@@ -1,84 +0,0 @@
-package wsflate
-
-import (
-	"io"
-)
-
-// Decompressor is an interface holding deflate decompression implementation.
-type Decompressor interface {
-	io.Reader
-}
-
-// ReadResetter is an optional interface that Decompressor can implement.
-type ReadResetter interface {
-	Reset(io.Reader)
-}
-
-// Reader implements decompression from an io.Reader object using Decompressor.
-// Essentially Reader is a thin wrapper around Decompressor interface to meet
-// PMCE specs.
-//
-// After all data has been written client should call Flush() method.
-// If any error occurs after reading from Reader, all subsequent calls to
-// Read() or Close() will return the error.
-//
-// Reader might be reused for different io.Reader objects after its Reset()
-// method has been called.
-type Reader struct {
-	src  io.Reader
-	ctor func(io.Reader) Decompressor
-	d    Decompressor
-	sr   suffixedReader
-	err  error
-}
-
-// NewReader returns a new Reader.
-func NewReader(r io.Reader, ctor func(io.Reader) Decompressor) *Reader {
-	ret := &Reader{
-		src:  r,
-		ctor: ctor,
-		sr: suffixedReader{
-			suffix: compressionReadTail,
-		},
-	}
-	ret.Reset(r)
-	return ret
-}
-
-// Reset resets Reader to decompress data from src.
-func (r *Reader) Reset(src io.Reader) {
-	r.err = nil
-	r.src = src
-	r.sr.reset(src)
-
-	if x, ok := r.d.(ReadResetter); ok {
-		x.Reset(r.sr.iface())
-	} else {
-		r.d = r.ctor(r.sr.iface())
-	}
-}
-
-// Read implements io.Reader.
-func (r *Reader) Read(p []byte) (n int, err error) {
-	if r.err != nil {
-		return 0, r.err
-	}
-	return r.d.Read(p)
-}
-
-// Close closes Reader and a Decompressor instance used under the hood (if it
-// implements io.Closer interface).
-func (r *Reader) Close() error {
-	if r.err != nil {
-		return r.err
-	}
-	if c, ok := r.d.(io.Closer); ok {
-		r.err = c.Close()
-	}
-	return r.err
-}
-
-// Err returns an error happened during any operation.
-func (r *Reader) Err() error {
-	return r.err
-}
diff --git a/vendor/github.com/gobwas/ws/wsflate/writer.go b/vendor/github.com/gobwas/ws/wsflate/writer.go
deleted file mode 100644
index 0342ccb..0000000
--- a/vendor/github.com/gobwas/ws/wsflate/writer.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package wsflate
-
-import (
-	"fmt"
-	"io"
-)
-
-var (
-	compressionTail = [4]byte{
-		0, 0, 0xff, 0xff,
-	}
-	compressionReadTail = [9]byte{
-		0, 0, 0xff, 0xff,
-		1,
-		0, 0, 0xff, 0xff,
-	}
-)
-
-// Compressor is an interface holding deflate compression implementation.
-type Compressor interface {
-	io.Writer
-	Flush() error
-}
-
-// WriteResetter is an optional interface that Compressor can implement.
-type WriteResetter interface {
-	Reset(io.Writer)
-}
-
-// Writer implements compression for an io.Writer object using Compressor.
-// Essentially Writer is a thin wrapper around Compressor interface to meet
-// PMCE specs.
-//
-// After all data has been written client should call Flush() method.
-// If any error occurs after writing to or flushing a Writer, all subsequent
-// calls to Write(), Flush() or Close() will return the error.
-//
-// Writer might be reused for different io.Writer objects after its Reset()
-// method has been called.
-type Writer struct {
-	// NOTE: Writer uses compressor constructor function instead of field to
-	// reach these goals:
-	// 	1. To shrink Compressor interface and make it easier to be implemented.
-	//	2. If used as a field (and argument to the NewWriter()), Compressor object
-	//	will probably be initialized twice - first time to pass into Writer, and
-	//	second time during Writer initialization (which does Reset() internally).
-	// 	3. To get rid of wrappers if Reset() would be a part of	Compressor.
-	// 	E.g. non conformant implementations would have to provide it somehow,
-	// 	probably making a wrapper with the same constructor function.
-	// 	4. To make Reader and Writer API the same. That is, there is no Reset()
-	// 	method for flate.Reader already, so we need to provide it as a wrapper
-	// 	(see point #3), or drop the Reader.Reset() method.
-	dest io.Writer
-	ctor func(io.Writer) Compressor
-	c    Compressor
-	cbuf cbuf
-	err  error
-}
-
-// NewWriter returns a new Writer.
-func NewWriter(w io.Writer, ctor func(io.Writer) Compressor) *Writer {
-	// NOTE: NewWriter() is chosen against structure with exported fields here
-	// due its Reset() method, which in case of structure, would change
-	// exported field.
-	ret := &Writer{
-		dest: w,
-		ctor: ctor,
-	}
-	ret.Reset(w)
-	return ret
-}
-
-// Reset resets Writer to compress data into dest.
-// Any not flushed data will be lost.
-func (w *Writer) Reset(dest io.Writer) {
-	w.err = nil
-	w.cbuf.reset(dest)
-	if x, ok := w.c.(WriteResetter); ok {
-		x.Reset(&w.cbuf)
-	} else {
-		w.c = w.ctor(&w.cbuf)
-	}
-}
-
-// Write implements io.Writer.
-func (w *Writer) Write(p []byte) (n int, err error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	n, w.err = w.c.Write(p)
-	return n, w.err
-}
-
-// Flush writes any pending data into w.Dest.
-func (w *Writer) Flush() error {
-	if w.err != nil {
-		return w.err
-	}
-	w.err = w.c.Flush()
-	w.checkTail()
-	return w.err
-}
-
-// Close closes Writer and a Compressor instance used under the hood (if it
-// implements io.Closer interface).
-func (w *Writer) Close() error {
-	if w.err != nil {
-		return w.err
-	}
-	if c, ok := w.c.(io.Closer); ok {
-		w.err = c.Close()
-	}
-	w.checkTail()
-	return w.err
-}
-
-// Err returns an error happened during any operation.
-func (w *Writer) Err() error {
-	return w.err
-}
-
-func (w *Writer) checkTail() {
-	if w.err == nil && w.cbuf.buf != compressionTail {
-		w.err = fmt.Errorf(
-			"wsflate: bad compressor: unexpected stream tail: %#x vs %#x",
-			w.cbuf.buf, compressionTail,
-		)
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/cipher.go b/vendor/github.com/gobwas/ws/wsutil/cipher.go
deleted file mode 100644
index bc25064..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/cipher.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package wsutil
-
-import (
-	"io"
-
-	"github.com/gobwas/pool/pbytes"
-	"github.com/gobwas/ws"
-)
-
-// CipherReader implements io.Reader that applies xor-cipher to the bytes read
-// from source.
-// It could help to unmask WebSocket frame payload on the fly.
-type CipherReader struct {
-	r    io.Reader
-	mask [4]byte
-	pos  int
-}
-
-// NewCipherReader creates xor-cipher reader from r with given mask.
-func NewCipherReader(r io.Reader, mask [4]byte) *CipherReader {
-	return &CipherReader{r, mask, 0}
-}
-
-// Reset resets CipherReader to read from r with given mask.
-func (c *CipherReader) Reset(r io.Reader, mask [4]byte) {
-	c.r = r
-	c.mask = mask
-	c.pos = 0
-}
-
-// Read implements io.Reader interface. It applies mask given during
-// initialization to every read byte.
-func (c *CipherReader) Read(p []byte) (n int, err error) {
-	n, err = c.r.Read(p)
-	ws.Cipher(p[:n], c.mask, c.pos)
-	c.pos += n
-	return n, err
-}
-
-// CipherWriter implements io.Writer that applies xor-cipher to the bytes
-// written to the destination writer. It does not modify the original bytes.
-type CipherWriter struct {
-	w    io.Writer
-	mask [4]byte
-	pos  int
-}
-
-// NewCipherWriter creates xor-cipher writer to w with given mask.
-func NewCipherWriter(w io.Writer, mask [4]byte) *CipherWriter {
-	return &CipherWriter{w, mask, 0}
-}
-
-// Reset reset CipherWriter to write to w with given mask.
-func (c *CipherWriter) Reset(w io.Writer, mask [4]byte) {
-	c.w = w
-	c.mask = mask
-	c.pos = 0
-}
-
-// Write implements io.Writer interface. It applies masking during
-// initialization to every sent byte. It does not modify original slice.
-func (c *CipherWriter) Write(p []byte) (n int, err error) {
-	cp := pbytes.GetLen(len(p))
-	defer pbytes.Put(cp)
-
-	copy(cp, p)
-	ws.Cipher(cp, c.mask, c.pos)
-	n, err = c.w.Write(cp)
-	c.pos += n
-
-	return n, err
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/dialer.go b/vendor/github.com/gobwas/ws/wsutil/dialer.go
deleted file mode 100644
index 4f8788f..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/dialer.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package wsutil
-
-import (
-	"bufio"
-	"bytes"
-	"context"
-	"io"
-	"io/ioutil"
-	"net"
-	"net/http"
-
-	"github.com/gobwas/ws"
-)
-
-// DebugDialer is a wrapper around ws.Dialer. It tracks i/o of WebSocket
-// handshake. That is, it gives ability to receive copied HTTP request and
-// response bytes that made inside Dialer.Dial().
-//
-// Note that it must not be used in production applications that requires
-// Dial() to be efficient.
-type DebugDialer struct {
-	// Dialer contains WebSocket connection establishment options.
-	Dialer ws.Dialer
-
-	// OnRequest and OnResponse are the callbacks that will be called with the
-	// HTTP request and response respectively.
-	OnRequest, OnResponse func([]byte)
-}
-
-// Dial connects to the url host and upgrades connection to WebSocket. It makes
-// it by calling d.Dialer.Dial().
-func (d *DebugDialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs ws.Handshake, err error) {
-	// Need to copy Dialer to prevent original object mutation.
-	dialer := d.Dialer
-	var (
-		reqBuf bytes.Buffer
-		resBuf bytes.Buffer
-
-		resContentLength int64
-	)
-	userWrap := dialer.WrapConn
-	dialer.WrapConn = func(c net.Conn) net.Conn {
-		if userWrap != nil {
-			c = userWrap(c)
-		}
-
-		// Save the pointer to the raw connection.
-		conn = c
-
-		var (
-			r io.Reader = conn
-			w io.Writer = conn
-		)
-		if d.OnResponse != nil {
-			r = &prefetchResponseReader{
-				source:        conn,
-				buffer:        &resBuf,
-				contentLength: &resContentLength,
-			}
-		}
-		if d.OnRequest != nil {
-			w = io.MultiWriter(conn, &reqBuf)
-		}
-		return rwConn{conn, r, w}
-	}
-
-	_, br, hs, err = dialer.Dial(ctx, urlstr)
-
-	if onRequest := d.OnRequest; onRequest != nil {
-		onRequest(reqBuf.Bytes())
-	}
-	if onResponse := d.OnResponse; onResponse != nil {
-		// We must split response inside buffered bytes from other received
-		// bytes from server.
-		p := resBuf.Bytes()
-		n := bytes.Index(p, headEnd)
-		h := n + len(headEnd)         // Head end index.
-		n = h + int(resContentLength) // Body end index.
-
-		onResponse(p[:n])
-
-		if br != nil {
-			// If br is non-nil, then it mean two things. First is that
-			// handshake is OK and server has sent additional bytes – probably
-			// immediate sent frames (or weird but possible response body).
-			// Second, the bad one, is that br buffer's source is now rwConn
-			// instance from above WrapConn call. It is incorrect, so we must
-			// fix it.
-			var r io.Reader = conn
-			if len(p) > h {
-				// Buffer contains more than just HTTP headers bytes.
-				r = io.MultiReader(
-					bytes.NewReader(p[h:]),
-					conn,
-				)
-			}
-			br.Reset(r)
-			// Must make br.Buffered() to be non-zero.
-			br.Peek(len(p[h:]))
-		}
-	}
-
-	return conn, br, hs, err
-}
-
-type rwConn struct {
-	net.Conn
-
-	r io.Reader
-	w io.Writer
-}
-
-func (rwc rwConn) Read(p []byte) (int, error) {
-	return rwc.r.Read(p)
-}
-
-func (rwc rwConn) Write(p []byte) (int, error) {
-	return rwc.w.Write(p)
-}
-
-var headEnd = []byte("\r\n\r\n")
-
-type prefetchResponseReader struct {
-	source io.Reader // Original connection source.
-	reader io.Reader // Wrapped reader used to read from by clients.
-	buffer *bytes.Buffer
-
-	contentLength *int64
-}
-
-func (r *prefetchResponseReader) Read(p []byte) (int, error) {
-	if r.reader == nil {
-		resp, err := http.ReadResponse(bufio.NewReader(
-			io.TeeReader(r.source, r.buffer),
-		), nil)
-		if err == nil {
-			*r.contentLength, _ = io.Copy(ioutil.Discard, resp.Body)
-			resp.Body.Close()
-		}
-		bts := r.buffer.Bytes()
-		r.reader = io.MultiReader(
-			bytes.NewReader(bts),
-			r.source,
-		)
-	}
-	return r.reader.Read(p)
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/extenstion.go b/vendor/github.com/gobwas/ws/wsutil/extenstion.go
deleted file mode 100644
index 6e1ebbf..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/extenstion.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package wsutil
-
-import "github.com/gobwas/ws"
-
-// RecvExtension is an interface for clearing fragment header RSV bits.
-type RecvExtension interface {
-	UnsetBits(ws.Header) (ws.Header, error)
-}
-
-// RecvExtensionFunc is an adapter to allow the use of ordinary functions as
-// RecvExtension.
-type RecvExtensionFunc func(ws.Header) (ws.Header, error)
-
-// BitsRecv implements RecvExtension.
-func (fn RecvExtensionFunc) UnsetBits(h ws.Header) (ws.Header, error) {
-	return fn(h)
-}
-
-// SendExtension is an interface for setting fragment header RSV bits.
-type SendExtension interface {
-	SetBits(ws.Header) (ws.Header, error)
-}
-
-// SendExtensionFunc is an adapter to allow the use of ordinary functions as
-// SendExtension.
-type SendExtensionFunc func(ws.Header) (ws.Header, error)
-
-// BitsSend implements SendExtension.
-func (fn SendExtensionFunc) SetBits(h ws.Header) (ws.Header, error) {
-	return fn(h)
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/handler.go b/vendor/github.com/gobwas/ws/wsutil/handler.go
deleted file mode 100644
index 44fd360..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/handler.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package wsutil
-
-import (
-	"errors"
-	"io"
-	"io/ioutil"
-	"strconv"
-
-	"github.com/gobwas/pool/pbytes"
-	"github.com/gobwas/ws"
-)
-
-// ClosedError returned when peer has closed the connection with appropriate
-// code and a textual reason.
-type ClosedError struct {
-	Code   ws.StatusCode
-	Reason string
-}
-
-// Error implements error interface.
-func (err ClosedError) Error() string {
-	return "ws closed: " + strconv.FormatUint(uint64(err.Code), 10) + " " + err.Reason
-}
-
-// ControlHandler contains logic of handling control frames.
-//
-// The intentional way to use it is to read the next frame header from the
-// connection, optionally check its validity via ws.CheckHeader() and if it is
-// not a ws.OpText of ws.OpBinary (or ws.OpContinuation) – pass it to Handle()
-// method.
-//
-// That is, passed header should be checked to get rid of unexpected errors.
-//
-// The Handle() method will read out all control frame payload (if any) and
-// write necessary bytes as a rfc compatible response.
-type ControlHandler struct {
-	Src   io.Reader
-	Dst   io.Writer
-	State ws.State
-
-	// DisableSrcCiphering disables unmasking payload data read from Src.
-	// It is useful when wsutil.Reader is used or when frame payload already
-	// pulled and ciphered out from the connection (and introduced by
-	// bytes.Reader, for example).
-	DisableSrcCiphering bool
-}
-
-// ErrNotControlFrame is returned by ControlHandler to indicate that given
-// header could not be handled.
-var ErrNotControlFrame = errors.New("not a control frame")
-
-// Handle handles control frames regarding to the c.State and writes responses
-// to the c.Dst when needed.
-//
-// It returns ErrNotControlFrame when given header is not of ws.OpClose,
-// ws.OpPing or ws.OpPong operation code.
-func (c ControlHandler) Handle(h ws.Header) error {
-	switch h.OpCode {
-	case ws.OpPing:
-		return c.HandlePing(h)
-	case ws.OpPong:
-		return c.HandlePong(h)
-	case ws.OpClose:
-		return c.HandleClose(h)
-	}
-	return ErrNotControlFrame
-}
-
-// HandlePing handles ping frame and writes specification compatible response
-// to the c.Dst.
-func (c ControlHandler) HandlePing(h ws.Header) error {
-	if h.Length == 0 {
-		// The most common case when ping is empty.
-		// Note that when sending masked frame the mask for empty payload is
-		// just four zero bytes.
-		return ws.WriteHeader(c.Dst, ws.Header{
-			Fin:    true,
-			OpCode: ws.OpPong,
-			Masked: c.State.ClientSide(),
-		})
-	}
-
-	// In other way reply with Pong frame with copied payload.
-	p := pbytes.GetLen(int(h.Length) + ws.HeaderSize(ws.Header{
-		Length: h.Length,
-		Masked: c.State.ClientSide(),
-	}))
-	defer pbytes.Put(p)
-
-	// Deal with ciphering i/o:
-	// Masking key is used to mask the "Payload data" defined in the same
-	// section as frame-payload-data, which includes "Extension data" and
-	// "Application data".
-	//
-	// See https://tools.ietf.org/html/rfc6455#section-5.3
-	//
-	// NOTE: We prefer ControlWriter with preallocated buffer to
-	// ws.WriteHeader because it performs one syscall instead of two.
-	w := NewControlWriterBuffer(c.Dst, c.State, ws.OpPong, p)
-	r := c.Src
-	if c.State.ServerSide() && !c.DisableSrcCiphering {
-		r = NewCipherReader(r, h.Mask)
-	}
-
-	_, err := io.Copy(w, r)
-	if err == nil {
-		err = w.Flush()
-	}
-
-	return err
-}
-
-// HandlePong handles pong frame by discarding it.
-func (c ControlHandler) HandlePong(h ws.Header) error {
-	if h.Length == 0 {
-		return nil
-	}
-
-	buf := pbytes.GetLen(int(h.Length))
-	defer pbytes.Put(buf)
-
-	// Discard pong message according to the RFC6455:
-	// A Pong frame MAY be sent unsolicited. This serves as a
-	// unidirectional heartbeat. A response to an unsolicited Pong frame
-	// is not expected.
-	_, err := io.CopyBuffer(ioutil.Discard, c.Src, buf)
-
-	return err
-}
-
-// HandleClose handles close frame, makes protocol validity checks and writes
-// specification compatible response to the c.Dst.
-func (c ControlHandler) HandleClose(h ws.Header) error {
-	if h.Length == 0 {
-		err := ws.WriteHeader(c.Dst, ws.Header{
-			Fin:    true,
-			OpCode: ws.OpClose,
-			Masked: c.State.ClientSide(),
-		})
-		if err != nil {
-			return err
-		}
-
-		// Due to RFC, we should interpret the code as no status code
-		// received:
-		//   If this Close control frame contains no status code, _The WebSocket
-		//   Connection Close Code_ is considered to be 1005.
-		//
-		// See https://tools.ietf.org/html/rfc6455#section-7.1.5
-		return ClosedError{
-			Code: ws.StatusNoStatusRcvd,
-		}
-	}
-
-	// Prepare bytes both for reading reason and sending response.
-	p := pbytes.GetLen(int(h.Length) + ws.HeaderSize(ws.Header{
-		Length: h.Length,
-		Masked: c.State.ClientSide(),
-	}))
-	defer pbytes.Put(p)
-
-	// Get the subslice to read the frame payload out.
-	subp := p[:h.Length]
-
-	r := c.Src
-	if c.State.ServerSide() && !c.DisableSrcCiphering {
-		r = NewCipherReader(r, h.Mask)
-	}
-	if _, err := io.ReadFull(r, subp); err != nil {
-		return err
-	}
-
-	code, reason := ws.ParseCloseFrameData(subp)
-	if err := ws.CheckCloseFrameData(code, reason); err != nil {
-		// Here we could not use the prepared bytes because there is no
-		// guarantee that it may fit our protocol error closure code and a
-		// reason.
-		c.closeWithProtocolError(err)
-		return err
-	}
-
-	// Deal with ciphering i/o:
-	// Masking key is used to mask the "Payload data" defined in the same
-	// section as frame-payload-data, which includes "Extension data" and
-	// "Application data".
-	//
-	// See https://tools.ietf.org/html/rfc6455#section-5.3
-	//
-	// NOTE: We prefer ControlWriter with preallocated buffer to
-	// ws.WriteHeader because it performs one syscall instead of two.
-	w := NewControlWriterBuffer(c.Dst, c.State, ws.OpClose, p)
-
-	// RFC6455#5.5.1:
-	// If an endpoint receives a Close frame and did not previously
-	// send a Close frame, the endpoint MUST send a Close frame in
-	// response. (When sending a Close frame in response, the endpoint
-	// typically echoes the status code it received.)
-	_, err := w.Write(p[:2])
-	if err != nil {
-		return err
-	}
-	if err := w.Flush(); err != nil {
-		return err
-	}
-	return ClosedError{
-		Code:   code,
-		Reason: reason,
-	}
-}
-
-func (c ControlHandler) closeWithProtocolError(reason error) error {
-	f := ws.NewCloseFrame(ws.NewCloseFrameBody(
-		ws.StatusProtocolError, reason.Error(),
-	))
-	if c.State.ClientSide() {
-		ws.MaskFrameInPlace(f)
-	}
-	return ws.WriteFrame(c.Dst, f)
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/helper.go b/vendor/github.com/gobwas/ws/wsutil/helper.go
deleted file mode 100644
index 231760b..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/helper.go
+++ /dev/null
@@ -1,279 +0,0 @@
-package wsutil
-
-import (
-	"bytes"
-	"io"
-	"io/ioutil"
-
-	"github.com/gobwas/ws"
-)
-
-// Message represents a message from peer, that could be presented in one or
-// more frames. That is, it contains payload of all message fragments and
-// operation code of initial frame for this message.
-type Message struct {
-	OpCode  ws.OpCode
-	Payload []byte
-}
-
-// ReadMessage is a helper function that reads next message from r. It appends
-// received message(s) to the third argument and returns the result of it and
-// an error if some failure happened. That is, it probably could receive more
-// than one message when peer sending fragmented message in multiple frames and
-// want to send some control frame between fragments. Then returned slice will
-// contain those control frames at first, and then result of gluing fragments.
-//
-// TODO(gobwas): add DefaultReader with buffer size options.
-func ReadMessage(r io.Reader, s ws.State, m []Message) ([]Message, error) {
-	rd := Reader{
-		Source:    r,
-		State:     s,
-		CheckUTF8: true,
-		OnIntermediate: func(hdr ws.Header, src io.Reader) error {
-			bts, err := ioutil.ReadAll(src)
-			if err != nil {
-				return err
-			}
-			m = append(m, Message{hdr.OpCode, bts})
-			return nil
-		},
-	}
-	h, err := rd.NextFrame()
-	if err != nil {
-		return m, err
-	}
-	var p []byte
-	if h.Fin {
-		// No more frames will be read. Use fixed sized buffer to read payload.
-		p = make([]byte, h.Length)
-		// It is not possible to receive io.EOF here because Reader does not
-		// return EOF if frame payload was successfully fetched.
-		// Thus we consistent here with io.Reader behavior.
-		_, err = io.ReadFull(&rd, p)
-	} else {
-		// Frame is fragmented, thus use ioutil.ReadAll behavior.
-		var buf bytes.Buffer
-		_, err = buf.ReadFrom(&rd)
-		p = buf.Bytes()
-	}
-	if err != nil {
-		return m, err
-	}
-	return append(m, Message{h.OpCode, p}), nil
-}
-
-// ReadClientMessage reads next message from r, considering that caller
-// represents server side.
-// It is a shortcut for ReadMessage(r, ws.StateServerSide, m).
-func ReadClientMessage(r io.Reader, m []Message) ([]Message, error) {
-	return ReadMessage(r, ws.StateServerSide, m)
-}
-
-// ReadServerMessage reads next message from r, considering that caller
-// represents client side.
-// It is a shortcut for ReadMessage(r, ws.StateClientSide, m).
-func ReadServerMessage(r io.Reader, m []Message) ([]Message, error) {
-	return ReadMessage(r, ws.StateClientSide, m)
-}
-
-// ReadData is a helper function that reads next data (non-control) message
-// from rw.
-// It takes care on handling all control frames. It will write response on
-// control frames to the write part of rw. It blocks until some data frame
-// will be received.
-//
-// Note this may handle and write control frames into the writer part of a
-// given io.ReadWriter.
-func ReadData(rw io.ReadWriter, s ws.State) ([]byte, ws.OpCode, error) {
-	return readData(rw, s, ws.OpText|ws.OpBinary)
-}
-
-// ReadClientData reads next data message from rw, considering that caller
-// represents server side. It is a shortcut for ReadData(rw, ws.StateServerSide).
-//
-// Note this may handle and write control frames into the writer part of a
-// given io.ReadWriter.
-func ReadClientData(rw io.ReadWriter) ([]byte, ws.OpCode, error) {
-	return ReadData(rw, ws.StateServerSide)
-}
-
-// ReadClientText reads next text message from rw, considering that caller
-// represents server side. It is a shortcut for ReadData(rw, ws.StateServerSide).
-// It discards received binary messages.
-//
-// Note this may handle and write control frames into the writer part of a
-// given io.ReadWriter.
-func ReadClientText(rw io.ReadWriter) ([]byte, error) {
-	p, _, err := readData(rw, ws.StateServerSide, ws.OpText)
-	return p, err
-}
-
-// ReadClientBinary reads next binary message from rw, considering that caller
-// represents server side. It is a shortcut for ReadData(rw, ws.StateServerSide).
-// It discards received text messages.
-//
-// Note this may handle and write control frames into the writer part of a given
-// io.ReadWriter.
-func ReadClientBinary(rw io.ReadWriter) ([]byte, error) {
-	p, _, err := readData(rw, ws.StateServerSide, ws.OpBinary)
-	return p, err
-}
-
-// ReadServerData reads next data message from rw, considering that caller
-// represents client side. It is a shortcut for ReadData(rw, ws.StateClientSide).
-//
-// Note this may handle and write control frames into the writer part of a
-// given io.ReadWriter.
-func ReadServerData(rw io.ReadWriter) ([]byte, ws.OpCode, error) {
-	return ReadData(rw, ws.StateClientSide)
-}
-
-// ReadServerText reads next text message from rw, considering that caller
-// represents client side. It is a shortcut for ReadData(rw, ws.StateClientSide).
-// It discards received binary messages.
-//
-// Note this may handle and write control frames into the writer part of a given
-// io.ReadWriter.
-func ReadServerText(rw io.ReadWriter) ([]byte, error) {
-	p, _, err := readData(rw, ws.StateClientSide, ws.OpText)
-	return p, err
-}
-
-// ReadServerBinary reads next binary message from rw, considering that caller
-// represents client side. It is a shortcut for ReadData(rw, ws.StateClientSide).
-// It discards received text messages.
-//
-// Note this may handle and write control frames into the writer part of a
-// given io.ReadWriter.
-func ReadServerBinary(rw io.ReadWriter) ([]byte, error) {
-	p, _, err := readData(rw, ws.StateClientSide, ws.OpBinary)
-	return p, err
-}
-
-// WriteMessage is a helper function that writes message to the w. It
-// constructs single frame with given operation code and payload.
-// It uses given state to prepare side-dependent things, like cipher
-// payload bytes from client to server. It will not mutate p bytes if
-// cipher must be made.
-//
-// If you want to write message in fragmented frames, use Writer instead.
-func WriteMessage(w io.Writer, s ws.State, op ws.OpCode, p []byte) error {
-	return writeFrame(w, s, op, true, p)
-}
-
-// WriteServerMessage writes message to w, considering that caller
-// represents server side.
-func WriteServerMessage(w io.Writer, op ws.OpCode, p []byte) error {
-	return WriteMessage(w, ws.StateServerSide, op, p)
-}
-
-// WriteServerText is the same as WriteServerMessage with
-// ws.OpText.
-func WriteServerText(w io.Writer, p []byte) error {
-	return WriteServerMessage(w, ws.OpText, p)
-}
-
-// WriteServerBinary is the same as WriteServerMessage with
-// ws.OpBinary.
-func WriteServerBinary(w io.Writer, p []byte) error {
-	return WriteServerMessage(w, ws.OpBinary, p)
-}
-
-// WriteClientMessage writes message to w, considering that caller
-// represents client side.
-func WriteClientMessage(w io.Writer, op ws.OpCode, p []byte) error {
-	return WriteMessage(w, ws.StateClientSide, op, p)
-}
-
-// WriteClientText is the same as WriteClientMessage with
-// ws.OpText.
-func WriteClientText(w io.Writer, p []byte) error {
-	return WriteClientMessage(w, ws.OpText, p)
-}
-
-// WriteClientBinary is the same as WriteClientMessage with
-// ws.OpBinary.
-func WriteClientBinary(w io.Writer, p []byte) error {
-	return WriteClientMessage(w, ws.OpBinary, p)
-}
-
-// HandleClientControlMessage handles control frame from conn and writes
-// response when needed.
-//
-// It considers that caller represents server side.
-func HandleClientControlMessage(conn io.Writer, msg Message) error {
-	return HandleControlMessage(conn, ws.StateServerSide, msg)
-}
-
-// HandleServerControlMessage handles control frame from conn and writes
-// response when needed.
-//
-// It considers that caller represents client side.
-func HandleServerControlMessage(conn io.Writer, msg Message) error {
-	return HandleControlMessage(conn, ws.StateClientSide, msg)
-}
-
-// HandleControlMessage handles message which was read by ReadMessage()
-// functions.
-//
-// That is, it is expected, that payload is already unmasked and frame header
-// were checked by ws.CheckHeader() call.
-func HandleControlMessage(conn io.Writer, state ws.State, msg Message) error {
-	return (ControlHandler{
-		DisableSrcCiphering: true,
-		Src:                 bytes.NewReader(msg.Payload),
-		Dst:                 conn,
-		State:               state,
-	}).Handle(ws.Header{
-		Length: int64(len(msg.Payload)),
-		OpCode: msg.OpCode,
-		Fin:    true,
-		Masked: state.ServerSide(),
-	})
-}
-
-// ControlFrameHandler returns FrameHandlerFunc for handling control frames.
-// For more info see ControlHandler docs.
-func ControlFrameHandler(w io.Writer, state ws.State) FrameHandlerFunc {
-	return func(h ws.Header, r io.Reader) error {
-		return (ControlHandler{
-			DisableSrcCiphering: true,
-			Src:                 r,
-			Dst:                 w,
-			State:               state,
-		}).Handle(h)
-	}
-}
-
-func readData(rw io.ReadWriter, s ws.State, want ws.OpCode) ([]byte, ws.OpCode, error) {
-	controlHandler := ControlFrameHandler(rw, s)
-	rd := Reader{
-		Source:          rw,
-		State:           s,
-		CheckUTF8:       true,
-		SkipHeaderCheck: false,
-		OnIntermediate:  controlHandler,
-	}
-	for {
-		hdr, err := rd.NextFrame()
-		if err != nil {
-			return nil, 0, err
-		}
-		if hdr.OpCode.IsControl() {
-			if err := controlHandler(hdr, &rd); err != nil {
-				return nil, 0, err
-			}
-			continue
-		}
-		if hdr.OpCode&want == 0 {
-			if err := rd.Discard(); err != nil {
-				return nil, 0, err
-			}
-			continue
-		}
-
-		bts, err := ioutil.ReadAll(&rd)
-
-		return bts, hdr.OpCode, err
-	}
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/reader.go b/vendor/github.com/gobwas/ws/wsutil/reader.go
deleted file mode 100644
index f2710af..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/reader.go
+++ /dev/null
@@ -1,373 +0,0 @@
-package wsutil
-
-import (
-	"encoding/binary"
-	"errors"
-	"io"
-	"io/ioutil"
-
-	"github.com/gobwas/ws"
-)
-
-// ErrNoFrameAdvance means that Reader's Read() method was called without
-// preceding NextFrame() call.
-var ErrNoFrameAdvance = errors.New("no frame advance")
-
-// ErrFrameTooLarge indicates that a message of length higher than
-// MaxFrameSize was being read.
-var ErrFrameTooLarge = errors.New("frame too large")
-
-// FrameHandlerFunc handles parsed frame header and its body represented by
-// io.Reader.
-//
-// Note that reader represents already unmasked body.
-type FrameHandlerFunc func(ws.Header, io.Reader) error
-
-// Reader is a wrapper around source io.Reader which represents WebSocket
-// connection. It contains options for reading messages from source.
-//
-// Reader implements io.Reader, which Read() method reads payload of incoming
-// WebSocket frames. It also takes care on fragmented frames and possibly
-// intermediate control frames between them.
-//
-// Note that Reader's methods are not goroutine safe.
-type Reader struct {
-	Source io.Reader
-	State  ws.State
-
-	// SkipHeaderCheck disables checking header bits to be RFC6455 compliant.
-	SkipHeaderCheck bool
-
-	// CheckUTF8 enables UTF-8 checks for text frames payload. If incoming
-	// bytes are not valid UTF-8 sequence, ErrInvalidUTF8 returned.
-	CheckUTF8 bool
-
-	// Extensions is a list of negotiated extensions for reader Source.
-	// It is used to meet the specs and clear appropriate bits in fragment
-	// header RSV segment.
-	Extensions []RecvExtension
-
-	// MaxFrameSize controls the maximum frame size in bytes
-	// that can be read. A message exceeding that size will return
-	// a ErrFrameTooLarge to the application.
-	//
-	// Not setting this field means there is no limit.
-	MaxFrameSize int64
-
-	OnContinuation FrameHandlerFunc
-	OnIntermediate FrameHandlerFunc
-
-	opCode ws.OpCode                  // Used to store message op code on fragmentation.
-	frame  io.Reader                  // Used to as frame reader.
-	raw    io.LimitedReader           // Used to discard frames without cipher.
-	utf8   UTF8Reader                 // Used to check UTF8 sequences if CheckUTF8 is true.
-	tmp    [ws.MaxHeaderSize - 2]byte // Used for reading headers.
-	cr     *CipherReader              // Used by NextFrame() to unmask frame payload.
-}
-
-// NewReader creates new frame reader that reads from r keeping given state to
-// make some protocol validity checks when it needed.
-func NewReader(r io.Reader, s ws.State) *Reader {
-	return &Reader{
-		Source: r,
-		State:  s,
-	}
-}
-
-// NewClientSideReader is a helper function that calls NewReader with r and
-// ws.StateClientSide.
-func NewClientSideReader(r io.Reader) *Reader {
-	return NewReader(r, ws.StateClientSide)
-}
-
-// NewServerSideReader is a helper function that calls NewReader with r and
-// ws.StateServerSide.
-func NewServerSideReader(r io.Reader) *Reader {
-	return NewReader(r, ws.StateServerSide)
-}
-
-// Read implements io.Reader. It reads the next message payload into p.
-// It takes care on fragmented messages.
-//
-// The error is io.EOF only if all of message bytes were read.
-// If an io.EOF happens during reading some but not all the message bytes
-// Read() returns io.ErrUnexpectedEOF.
-//
-// The error is ErrNoFrameAdvance if no NextFrame() call was made before
-// reading next message bytes.
-func (r *Reader) Read(p []byte) (n int, err error) {
-	if r.frame == nil {
-		if !r.fragmented() {
-			// Every new Read() must be preceded by NextFrame() call.
-			return 0, ErrNoFrameAdvance
-		}
-		// Read next continuation or intermediate control frame.
-		_, err := r.NextFrame()
-		if err != nil {
-			return 0, err
-		}
-		if r.frame == nil {
-			// We handled intermediate control and now got nothing to read.
-			return 0, nil
-		}
-	}
-
-	n, err = r.frame.Read(p)
-	if err != nil && err != io.EOF {
-		return n, err
-	}
-	if err == nil && r.raw.N != 0 {
-		return n, nil
-	}
-
-	// EOF condition (either err is io.EOF or r.raw.N is zero).
-	switch {
-	case r.raw.N != 0:
-		err = io.ErrUnexpectedEOF
-
-	case r.fragmented():
-		err = nil
-		r.resetFragment()
-
-	case r.CheckUTF8 && !r.utf8.Valid():
-		// NOTE: check utf8 only when full message received, since partial
-		// reads may be invalid.
-		n = r.utf8.Accepted()
-		err = ErrInvalidUTF8
-
-	default:
-		r.reset()
-		err = io.EOF
-	}
-
-	return n, err
-}
-
-// Discard discards current message unread bytes.
-// It discards all frames of fragmented message.
-func (r *Reader) Discard() (err error) {
-	for {
-		_, err = io.Copy(ioutil.Discard, &r.raw)
-		if err != nil {
-			break
-		}
-		if !r.fragmented() {
-			break
-		}
-		if _, err = r.NextFrame(); err != nil {
-			break
-		}
-	}
-	r.reset()
-	return err
-}
-
-// NextFrame prepares r to read next message. It returns received frame header
-// and non-nil error on failure.
-//
-// Note that next NextFrame() call must be done after receiving or discarding
-// all current message bytes.
-func (r *Reader) NextFrame() (hdr ws.Header, err error) {
-	hdr, err = r.readHeader(r.Source)
-	if err == io.EOF && r.fragmented() {
-		// If we are in fragmented state EOF means that is was totally
-		// unexpected.
-		//
-		// NOTE: This is necessary to prevent callers such that
-		// ioutil.ReadAll to receive some amount of bytes without an error.
-		// ReadAll() ignores an io.EOF error, thus caller may think that
-		// whole message fetched, but actually only part of it.
-		err = io.ErrUnexpectedEOF
-	}
-	if err == nil && !r.SkipHeaderCheck {
-		err = ws.CheckHeader(hdr, r.State)
-	}
-	if err != nil {
-		return hdr, err
-	}
-
-	if n := r.MaxFrameSize; n > 0 && hdr.Length > n {
-		return hdr, ErrFrameTooLarge
-	}
-
-	// Save raw reader to use it on discarding frame without ciphering and
-	// other streaming checks.
-	r.raw = io.LimitedReader{
-		R: r.Source,
-		N: hdr.Length,
-	}
-
-	frame := io.Reader(&r.raw)
-	if hdr.Masked {
-		if r.cr == nil {
-			r.cr = NewCipherReader(frame, hdr.Mask)
-		} else {
-			r.cr.Reset(frame, hdr.Mask)
-		}
-		frame = r.cr
-	}
-
-	for _, x := range r.Extensions {
-		hdr, err = x.UnsetBits(hdr)
-		if err != nil {
-			return hdr, err
-		}
-	}
-
-	if r.fragmented() {
-		if hdr.OpCode.IsControl() {
-			if cb := r.OnIntermediate; cb != nil {
-				err = cb(hdr, frame)
-			}
-			if err == nil {
-				// Ensure that src is empty.
-				_, err = io.Copy(ioutil.Discard, &r.raw)
-			}
-			return hdr, err
-		}
-	} else {
-		r.opCode = hdr.OpCode
-	}
-	if r.CheckUTF8 && (hdr.OpCode == ws.OpText || (r.fragmented() && r.opCode == ws.OpText)) {
-		r.utf8.Source = frame
-		frame = &r.utf8
-	}
-
-	// Save reader with ciphering and other streaming checks.
-	r.frame = frame
-
-	if hdr.OpCode == ws.OpContinuation {
-		if cb := r.OnContinuation; cb != nil {
-			err = cb(hdr, frame)
-		}
-	}
-
-	if hdr.Fin {
-		r.State = r.State.Clear(ws.StateFragmented)
-	} else {
-		r.State = r.State.Set(ws.StateFragmented)
-	}
-
-	return hdr, err
-}
-
-func (r *Reader) fragmented() bool {
-	return r.State.Fragmented()
-}
-
-func (r *Reader) resetFragment() {
-	r.raw = io.LimitedReader{}
-	r.frame = nil
-	// Reset source of the UTF8Reader, but not the state.
-	r.utf8.Source = nil
-}
-
-func (r *Reader) reset() {
-	r.raw = io.LimitedReader{}
-	r.frame = nil
-	r.utf8 = UTF8Reader{}
-	r.opCode = 0
-}
-
-// readHeader reads a frame header from in.
-func (r *Reader) readHeader(in io.Reader) (h ws.Header, err error) {
-	// Make slice of bytes with capacity 12 that could hold any header.
-	//
-	// The maximum header size is 14, but due to the 2 hop reads,
-	// after first hop that reads first 2 constant bytes, we could reuse 2 bytes.
-	// So 14 - 2 = 12.
-	bts := r.tmp[:2]
-
-	// Prepare to hold first 2 bytes to choose size of next read.
-	_, err = io.ReadFull(in, bts)
-	if err != nil {
-		return h, err
-	}
-	const bit0 = 0x80
-
-	h.Fin = bts[0]&bit0 != 0
-	h.Rsv = (bts[0] & 0x70) >> 4
-	h.OpCode = ws.OpCode(bts[0] & 0x0f)
-
-	var extra int
-
-	if bts[1]&bit0 != 0 {
-		h.Masked = true
-		extra += 4
-	}
-
-	length := bts[1] & 0x7f
-	switch {
-	case length < 126:
-		h.Length = int64(length)
-
-	case length == 126:
-		extra += 2
-
-	case length == 127:
-		extra += 8
-
-	default:
-		err = ws.ErrHeaderLengthUnexpected
-		return h, err
-	}
-
-	if extra == 0 {
-		return h, err
-	}
-
-	// Increase len of bts to extra bytes need to read.
-	// Overwrite first 2 bytes that was read before.
-	bts = bts[:extra]
-	_, err = io.ReadFull(in, bts)
-	if err != nil {
-		return h, err
-	}
-
-	switch {
-	case length == 126:
-		h.Length = int64(binary.BigEndian.Uint16(bts[:2]))
-		bts = bts[2:]
-
-	case length == 127:
-		if bts[0]&0x80 != 0 {
-			err = ws.ErrHeaderLengthMSB
-			return h, err
-		}
-		h.Length = int64(binary.BigEndian.Uint64(bts[:8]))
-		bts = bts[8:]
-	}
-
-	if h.Masked {
-		copy(h.Mask[:], bts)
-	}
-
-	return h, nil
-}
-
-// NextReader prepares next message read from r. It returns header that
-// describes the message and io.Reader to read message's payload. It returns
-// non-nil error when it is not possible to read message's initial frame.
-//
-// Note that next NextReader() on the same r should be done after reading all
-// bytes from previously returned io.Reader. For more performant way to discard
-// message use Reader and its Discard() method.
-//
-// Note that it will not handle any "intermediate" frames, that possibly could
-// be received between text/binary continuation frames. That is, if peer sent
-// text/binary frame with fin flag "false", then it could send ping frame, and
-// eventually remaining part of text/binary frame with fin "true" – with
-// NextReader() the ping frame will be dropped without any notice. To handle
-// this rare, but possible situation (and if you do not know exactly which
-// frames peer could send), you could use Reader with OnIntermediate field set.
-func NextReader(r io.Reader, s ws.State) (ws.Header, io.Reader, error) {
-	rd := &Reader{
-		Source: r,
-		State:  s,
-	}
-	header, err := rd.NextFrame()
-	if err != nil {
-		return header, nil, err
-	}
-	return header, rd, nil
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/upgrader.go b/vendor/github.com/gobwas/ws/wsutil/upgrader.go
deleted file mode 100644
index 2ed351e..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/upgrader.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package wsutil
-
-import (
-	"bufio"
-	"bytes"
-	"io"
-	"io/ioutil"
-	"net/http"
-
-	"github.com/gobwas/ws"
-)
-
-// DebugUpgrader is a wrapper around ws.Upgrader. It tracks I/O of a
-// WebSocket handshake.
-//
-// Note that it must not be used in production applications that requires
-// Upgrade() to be efficient.
-type DebugUpgrader struct {
-	// Upgrader contains upgrade to WebSocket options.
-	Upgrader ws.Upgrader
-
-	// OnRequest and OnResponse are the callbacks that will be called with the
-	// HTTP request and response respectively.
-	OnRequest, OnResponse func([]byte)
-}
-
-// Upgrade calls Upgrade() on underlying ws.Upgrader and tracks I/O on conn.
-func (d *DebugUpgrader) Upgrade(conn io.ReadWriter) (hs ws.Handshake, err error) {
-	var (
-		// Take the Reader and Writer parts from conn to be probably replaced
-		// below.
-		r io.Reader = conn
-		w io.Writer = conn
-	)
-	if onRequest := d.OnRequest; onRequest != nil {
-		var buf bytes.Buffer
-		// First, we must read the entire request.
-		req, err := http.ReadRequest(bufio.NewReader(
-			io.TeeReader(conn, &buf),
-		))
-		if err == nil {
-			// Fulfill the buffer with the response body.
-			io.Copy(ioutil.Discard, req.Body)
-			req.Body.Close()
-		}
-		onRequest(buf.Bytes())
-
-		r = io.MultiReader(
-			&buf, conn,
-		)
-	}
-
-	if onResponse := d.OnResponse; onResponse != nil {
-		var buf bytes.Buffer
-		// Intercept the response stream written by the Upgrade().
-		w = io.MultiWriter(
-			conn, &buf,
-		)
-		defer func() {
-			onResponse(buf.Bytes())
-		}()
-	}
-
-	return d.Upgrader.Upgrade(struct {
-		io.Reader
-		io.Writer
-	}{r, w})
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/utf8.go b/vendor/github.com/gobwas/ws/wsutil/utf8.go
deleted file mode 100644
index b8dc726..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/utf8.go
+++ /dev/null
@@ -1,140 +0,0 @@
-package wsutil
-
-import (
-	"fmt"
-	"io"
-)
-
-// ErrInvalidUTF8 is returned by UTF8 reader on invalid utf8 sequence.
-var ErrInvalidUTF8 = fmt.Errorf("invalid utf8")
-
-// UTF8Reader implements io.Reader that calculates utf8 validity state after
-// every read byte from Source.
-//
-// Note that in some cases client must call r.Valid() after all bytes are read
-// to ensure that all of them are valid utf8 sequences. That is, some io helper
-// functions such io.ReadAtLeast or io.ReadFull could discard the error
-// information returned by the reader when they receive all of requested bytes.
-// For example, the last read sequence is invalid and UTF8Reader returns number
-// of bytes read and an error. But helper function decides to discard received
-// error due to all requested bytes are completely read from the source.
-//
-// Another possible case is when some valid sequence become split by the read
-// bound. Then UTF8Reader can not make decision about validity of the last
-// sequence cause it is not fully read yet. And if the read stops, Valid() will
-// return false, even if Read() by itself dit not.
-type UTF8Reader struct {
-	Source io.Reader
-
-	accepted int
-
-	state uint32
-	codep uint32
-}
-
-// NewUTF8Reader creates utf8 reader that reads from r.
-func NewUTF8Reader(r io.Reader) *UTF8Reader {
-	return &UTF8Reader{
-		Source: r,
-	}
-}
-
-// Reset resets utf8 reader to read from r.
-func (u *UTF8Reader) Reset(r io.Reader) {
-	u.Source = r
-	u.state = 0
-	u.codep = 0
-}
-
-// Read implements io.Reader.
-func (u *UTF8Reader) Read(p []byte) (n int, err error) {
-	n, err = u.Source.Read(p)
-
-	accepted := 0
-	s, c := u.state, u.codep
-	for i := 0; i < n; i++ {
-		c, s = decode(s, c, p[i])
-		if s == utf8Reject {
-			u.state = s
-			return accepted, ErrInvalidUTF8
-		}
-		if s == utf8Accept {
-			accepted = i + 1
-		}
-	}
-	u.state, u.codep = s, c
-	u.accepted = accepted
-
-	return n, err
-}
-
-// Valid checks current reader state. It returns true if all read bytes are
-// valid UTF-8 sequences, and false if not.
-func (u *UTF8Reader) Valid() bool {
-	return u.state == utf8Accept
-}
-
-// Accepted returns number of valid bytes in last Read().
-func (u *UTF8Reader) Accepted() int {
-	return u.accepted
-}
-
-// Below is port of UTF-8 decoder from http://bjoern.hoehrmann.de/utf-8/decoder/dfa/
-//
-// Copyright (c) 2008-2009 Bjoern Hoehrmann <bjoern@hoehrmann.de>
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to
-// deal in the Software without restriction, including without limitation the
-// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-// sell copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-// IN THE SOFTWARE.
-
-const (
-	utf8Accept = 0
-	utf8Reject = 12
-)
-
-var utf8d = [...]byte{
-	// The first part of the table maps bytes to character classes that
-	// to reduce the size of the transition table and create bitmasks.
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-	1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
-	7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-	8, 8, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
-	10, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 3, 3, 11, 6, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-
-	// The second part is a transition table that maps a combination
-	// of a state of the automaton and a character class to a state.
-	0, 12, 24, 36, 60, 96, 84, 12, 12, 12, 48, 72, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
-	12, 0, 12, 12, 12, 12, 12, 0, 12, 0, 12, 12, 12, 24, 12, 12, 12, 12, 12, 24, 12, 24, 12, 12,
-	12, 12, 12, 12, 12, 12, 12, 24, 12, 12, 12, 12, 12, 24, 12, 12, 12, 12, 12, 12, 12, 24, 12, 12,
-	12, 12, 12, 12, 12, 12, 12, 36, 12, 36, 12, 12, 12, 36, 12, 12, 12, 12, 12, 36, 12, 36, 12, 12,
-	12, 36, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
-}
-
-func decode(state, codep uint32, b byte) (uint32, uint32) {
-	t := uint32(utf8d[b])
-
-	if state != utf8Accept {
-		codep = (uint32(b) & 0x3f) | (codep << 6)
-	} else {
-		codep = (0xff >> t) & uint32(b)
-	}
-
-	return codep, uint32(utf8d[256+state+t])
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/writer.go b/vendor/github.com/gobwas/ws/wsutil/writer.go
deleted file mode 100644
index 6a837cf..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/writer.go
+++ /dev/null
@@ -1,599 +0,0 @@
-package wsutil
-
-import (
-	"fmt"
-	"io"
-
-	"github.com/gobwas/pool"
-	"github.com/gobwas/pool/pbytes"
-	"github.com/gobwas/ws"
-)
-
-// DefaultWriteBuffer contains size of Writer's default buffer. It used by
-// Writer constructor functions.
-var DefaultWriteBuffer = 4096
-
-var (
-	// ErrNotEmpty is returned by Writer.WriteThrough() to indicate that buffer is
-	// not empty and write through could not be done. That is, caller should call
-	// Writer.FlushFragment() to make buffer empty.
-	ErrNotEmpty = fmt.Errorf("writer not empty")
-
-	// ErrControlOverflow is returned by ControlWriter.Write() to indicate that
-	// no more data could be written to the underlying io.Writer because
-	// MaxControlFramePayloadSize limit is reached.
-	ErrControlOverflow = fmt.Errorf("control frame payload overflow")
-)
-
-// Constants which are represent frame length ranges.
-const (
-	len7  = int64(125) // 126 and 127 are reserved values
-	len16 = int64(^uint16(0))
-	len64 = int64((^uint64(0)) >> 1)
-)
-
-// ControlWriter is a wrapper around Writer that contains some guards for
-// buffered writes of control frames.
-type ControlWriter struct {
-	w     *Writer
-	limit int
-	n     int
-}
-
-// NewControlWriter contains ControlWriter with Writer inside whose buffer size
-// is at most ws.MaxControlFramePayloadSize + ws.MaxHeaderSize.
-func NewControlWriter(dest io.Writer, state ws.State, op ws.OpCode) *ControlWriter {
-	return &ControlWriter{
-		w:     NewWriterSize(dest, state, op, ws.MaxControlFramePayloadSize),
-		limit: ws.MaxControlFramePayloadSize,
-	}
-}
-
-// NewControlWriterBuffer returns a new ControlWriter with buf as a buffer.
-//
-// Note that it reserves x bytes of buf for header data, where x could be
-// ws.MinHeaderSize or ws.MinHeaderSize+4 (depending on state). At most
-// (ws.MaxControlFramePayloadSize + x) bytes of buf will be used.
-//
-// It panics if len(buf) <= ws.MinHeaderSize + x.
-func NewControlWriterBuffer(dest io.Writer, state ws.State, op ws.OpCode, buf []byte) *ControlWriter {
-	max := ws.MaxControlFramePayloadSize + headerSize(state, ws.MaxControlFramePayloadSize)
-	if len(buf) > max {
-		buf = buf[:max]
-	}
-
-	w := NewWriterBuffer(dest, state, op, buf)
-
-	return &ControlWriter{
-		w:     w,
-		limit: len(w.buf),
-	}
-}
-
-// Write implements io.Writer. It writes to the underlying Writer until it
-// returns error or until ControlWriter write limit will be exceeded.
-func (c *ControlWriter) Write(p []byte) (n int, err error) {
-	if c.n+len(p) > c.limit {
-		return 0, ErrControlOverflow
-	}
-	return c.w.Write(p)
-}
-
-// Flush flushes all buffered data to the underlying io.Writer.
-func (c *ControlWriter) Flush() error {
-	return c.w.Flush()
-}
-
-var writers = pool.New(128, 65536)
-
-// GetWriter tries to reuse Writer getting it from the pool.
-//
-// This function is intended for memory consumption optimizations, because
-// NewWriter*() functions make allocations for inner buffer.
-//
-// Note the it ceils n to the power of two.
-//
-// If you have your own bytes buffer pool you could use NewWriterBuffer to use
-// pooled bytes in writer.
-func GetWriter(dest io.Writer, state ws.State, op ws.OpCode, n int) *Writer {
-	x, m := writers.Get(n)
-	if x != nil {
-		w := x.(*Writer)
-		w.Reset(dest, state, op)
-		return w
-	}
-	// NOTE: we use m instead of n, because m is an attempt to reuse w of such
-	// size in the future.
-	return NewWriterBufferSize(dest, state, op, m)
-}
-
-// PutWriter puts w for future reuse by GetWriter().
-func PutWriter(w *Writer) {
-	w.Reset(nil, 0, 0)
-	writers.Put(w, w.Size())
-}
-
-// Writer contains logic of buffering output data into a WebSocket fragments.
-// It is much the same as bufio.Writer, except the thing that it works with
-// WebSocket frames, not the raw data.
-//
-// Writer writes frames with specified OpCode.
-// It uses ws.State to decide whether the output frames must be masked.
-//
-// Note that it does not check control frame size or other RFC rules.
-// That is, it must be used with special care to write control frames without
-// violation of RFC. You could use ControlWriter that wraps Writer and contains
-// some guards for writing control frames.
-//
-// If an error occurs writing to a Writer, no more data will be accepted and
-// all subsequent writes will return the error.
-//
-// After all data has been written, the client should call the Flush() method
-// to guarantee all data has been forwarded to the underlying io.Writer.
-type Writer struct {
-	// dest specifies a destination of buffer flushes.
-	dest io.Writer
-
-	// op specifies the WebSocket operation code used in flushed frames.
-	op ws.OpCode
-
-	// state specifies the state of the Writer.
-	state ws.State
-
-	// extensions is a list of negotiated extensions for writer Dest.
-	// It is used to meet the specs and set appropriate bits in fragment
-	// header RSV segment.
-	extensions []SendExtension
-
-	// noFlush reports whether buffer must grow instead of being flushed.
-	noFlush bool
-
-	// Raw representation of the buffer, including reserved header bytes.
-	raw []byte
-
-	// Writeable part of buffer, without reserved header bytes.
-	// Resetting this to nil will not result in reallocation if raw is not nil.
-	// And vice versa: if buf is not nil, then Writer is assumed as ready and
-	// initialized.
-	buf []byte
-
-	// Buffered bytes counter.
-	n int
-
-	dirty bool
-	fseq  int
-	err   error
-}
-
-// NewWriter returns a new Writer whose buffer has the DefaultWriteBuffer size.
-func NewWriter(dest io.Writer, state ws.State, op ws.OpCode) *Writer {
-	return NewWriterBufferSize(dest, state, op, 0)
-}
-
-// NewWriterSize returns a new Writer whose buffer size is at most n + ws.MaxHeaderSize.
-// That is, output frames payload length could be up to n, except the case when
-// Write() is called on empty Writer with len(p) > n.
-//
-// If n <= 0 then the default buffer size is used as Writer's buffer size.
-func NewWriterSize(dest io.Writer, state ws.State, op ws.OpCode, n int) *Writer {
-	if n > 0 {
-		n += headerSize(state, n)
-	}
-	return NewWriterBufferSize(dest, state, op, n)
-}
-
-// NewWriterBufferSize returns a new Writer whose buffer size is equal to n.
-// If n <= ws.MinHeaderSize then the default buffer size is used.
-//
-// Note that Writer will reserve x bytes for header data, where x is in range
-// [ws.MinHeaderSize,ws.MaxHeaderSize]. That is, frames flushed by Writer
-// will not have payload length equal to n, except the case when Write() is
-// called on empty Writer with len(p) > n.
-func NewWriterBufferSize(dest io.Writer, state ws.State, op ws.OpCode, n int) *Writer {
-	if n <= ws.MinHeaderSize {
-		n = DefaultWriteBuffer
-	}
-	return NewWriterBuffer(dest, state, op, make([]byte, n))
-}
-
-// NewWriterBuffer returns a new Writer with buf as a buffer.
-//
-// Note that it reserves x bytes of buf for header data, where x is in range
-// [ws.MinHeaderSize,ws.MaxHeaderSize] (depending on state and buf size).
-//
-// You could use ws.HeaderSize() to calculate number of bytes needed to store
-// header data.
-//
-// It panics if len(buf) is too small to fit header and payload data.
-func NewWriterBuffer(dest io.Writer, state ws.State, op ws.OpCode, buf []byte) *Writer {
-	w := &Writer{
-		dest:  dest,
-		state: state,
-		op:    op,
-		raw:   buf,
-	}
-	w.initBuf()
-	return w
-}
-
-func (w *Writer) initBuf() {
-	offset := reserve(w.state, len(w.raw))
-	if len(w.raw) <= offset {
-		panic("wsutil: writer buffer is too small")
-	}
-	w.buf = w.raw[offset:]
-}
-
-// Reset resets Writer as it was created by New() methods.
-// Note that Reset does reset extensions and other options was set after
-// Writer initialization.
-func (w *Writer) Reset(dest io.Writer, state ws.State, op ws.OpCode) {
-	w.dest = dest
-	w.state = state
-	w.op = op
-
-	w.initBuf()
-
-	w.n = 0
-	w.dirty = false
-	w.fseq = 0
-	w.extensions = w.extensions[:0]
-	w.noFlush = false
-}
-
-// ResetOp is an quick version of Reset().
-// ResetOp does reset unwritten fragments and does not reset results of
-// SetExtensions() or DisableFlush() methods.
-func (w *Writer) ResetOp(op ws.OpCode) {
-	w.op = op
-	w.n = 0
-	w.dirty = false
-	w.fseq = 0
-}
-
-// SetExtensions adds xs as extensions to be used during writes.
-func (w *Writer) SetExtensions(xs ...SendExtension) {
-	w.extensions = xs
-}
-
-// DisableFlush denies Writer to write fragments.
-func (w *Writer) DisableFlush() {
-	w.noFlush = true
-}
-
-// Size returns the size of the underlying buffer in bytes (not including
-// WebSocket header bytes).
-func (w *Writer) Size() int {
-	return len(w.buf)
-}
-
-// Available returns how many bytes are unused in the buffer.
-func (w *Writer) Available() int {
-	return len(w.buf) - w.n
-}
-
-// Buffered returns the number of bytes that have been written into the current
-// buffer.
-func (w *Writer) Buffered() int {
-	return w.n
-}
-
-// Write implements io.Writer.
-//
-// Note that even if the Writer was created to have N-sized buffer, Write()
-// with payload of N bytes will not fit into that buffer. Writer reserves some
-// space to fit WebSocket header data.
-func (w *Writer) Write(p []byte) (n int, err error) {
-	// Even empty p may make a sense.
-	w.dirty = true
-
-	var nn int
-	for len(p) > w.Available() && w.err == nil {
-		if w.noFlush {
-			w.Grow(len(p))
-			continue
-		}
-		if w.Buffered() == 0 {
-			// Large write, empty buffer. Write directly from p to avoid copy.
-			// Trade off here is that we make additional Write() to underlying
-			// io.Writer when writing frame header.
-			//
-			// On large buffers additional write is better than copying.
-			nn, _ = w.WriteThrough(p)
-		} else {
-			nn = copy(w.buf[w.n:], p)
-			w.n += nn
-			w.FlushFragment()
-		}
-		n += nn
-		p = p[nn:]
-	}
-	if w.err != nil {
-		return n, w.err
-	}
-	nn = copy(w.buf[w.n:], p)
-	w.n += nn
-	n += nn
-
-	// Even if w.Available() == 0 we will not flush buffer preventively because
-	// this could bring unwanted fragmentation. That is, user could create
-	// buffer with size that fits exactly all further Write() call, and then
-	// call Flush(), excepting that single and not fragmented frame will be
-	// sent. With preemptive flush this case will produce two frames – last one
-	// will be empty and just to set fin = true.
-
-	return n, w.err
-}
-
-func ceilPowerOfTwo(n int) int {
-	n |= n >> 1
-	n |= n >> 2
-	n |= n >> 4
-	n |= n >> 8
-	n |= n >> 16
-	n |= n >> 32
-	n++
-	return n
-}
-
-// Grow grows Writer's internal buffer capacity to guarantee space for another
-// n bytes of _payload_ -- that is, frame header is not included in n.
-func (w *Writer) Grow(n int) {
-	// NOTE: we must respect the possibility of header reserved bytes grow.
-	var (
-		size       = len(w.raw)
-		prevOffset = len(w.raw) - len(w.buf)
-		nextOffset = len(w.raw) - len(w.buf)
-		buffered   = w.Buffered()
-	)
-	for cap := size - nextOffset - buffered; cap < n; {
-		// This loop runs twice only at split cases, when reservation of raw
-		// buffer space for the header shrinks capacity of new buffer such that
-		// it still less than n.
-		//
-		// Loop is safe here because:
-		// - (offset + buffered + n) is greater than size, otherwise (cap < n)
-		//   would be false:
-		//   size  = offset + buffered + freeSpace (cap)
-		//   size' = offset + buffered + wantSpace (n)
-		//   Since (cap < n) is true in the loop condition, size' is guaranteed
-		//   to be greater => no infinite loop.
-		size = ceilPowerOfTwo(nextOffset + buffered + n)
-		nextOffset = reserve(w.state, size)
-		cap = size - nextOffset - buffered
-	}
-	if size < len(w.raw) {
-		panic("wsutil: buffer grow leads to its reduce")
-	}
-	if size == len(w.raw) {
-		return
-	}
-	p := make([]byte, size)
-	copy(p[nextOffset-prevOffset:], w.raw[:prevOffset+buffered])
-	w.raw = p
-	w.buf = w.raw[nextOffset:]
-}
-
-// WriteThrough writes data bypassing the buffer.
-// Note that Writer's buffer must be empty before calling WriteThrough().
-func (w *Writer) WriteThrough(p []byte) (n int, err error) {
-	if w.err != nil {
-		return 0, w.err
-	}
-	if w.Buffered() != 0 {
-		return 0, ErrNotEmpty
-	}
-
-	var frame ws.Frame
-	frame.Header = ws.Header{
-		OpCode: w.opCode(),
-		Fin:    false,
-		Length: int64(len(p)),
-	}
-	for _, x := range w.extensions {
-		frame.Header, err = x.SetBits(frame.Header)
-		if err != nil {
-			return 0, err
-		}
-	}
-	if w.state.ClientSide() {
-		// Should copy bytes to prevent corruption of caller data.
-		payload := pbytes.GetLen(len(p))
-		defer pbytes.Put(payload)
-		copy(payload, p)
-
-		frame.Payload = payload
-		frame = ws.MaskFrameInPlace(frame)
-	} else {
-		frame.Payload = p
-	}
-
-	w.err = ws.WriteFrame(w.dest, frame)
-	if w.err == nil {
-		n = len(p)
-	}
-
-	w.dirty = true
-	w.fseq++
-
-	return n, w.err
-}
-
-// ReadFrom implements io.ReaderFrom.
-func (w *Writer) ReadFrom(src io.Reader) (n int64, err error) {
-	var nn int
-	for err == nil {
-		if w.Available() == 0 {
-			if w.noFlush {
-				w.Grow(w.Buffered()) // Twice bigger.
-			} else {
-				err = w.FlushFragment()
-			}
-			continue
-		}
-
-		// We copy the behavior of bufio.Writer here.
-		// Also, from the docs on io.ReaderFrom:
-		//   ReadFrom reads data from r until EOF or error.
-		//
-		// See https://codereview.appspot.com/76400048/#ps1
-		const maxEmptyReads = 100
-		var nr int
-		for nr < maxEmptyReads {
-			nn, err = src.Read(w.buf[w.n:])
-			if nn != 0 || err != nil {
-				break
-			}
-			nr++
-		}
-		if nr == maxEmptyReads {
-			return n, io.ErrNoProgress
-		}
-
-		w.n += nn
-		n += int64(nn)
-	}
-	if err == io.EOF {
-		// NOTE: Do not flush preemptively.
-		// See the Write() sources for more info.
-		err = nil
-		w.dirty = true
-	}
-	return n, err
-}
-
-// Flush writes any buffered data to the underlying io.Writer.
-// It sends the frame with "fin" flag set to true.
-//
-// If no Write() or ReadFrom() was made, then Flush() does nothing.
-func (w *Writer) Flush() error {
-	if (!w.dirty && w.Buffered() == 0) || w.err != nil {
-		return w.err
-	}
-
-	w.err = w.flushFragment(true)
-	w.n = 0
-	w.dirty = false
-	w.fseq = 0
-
-	return w.err
-}
-
-// FlushFragment writes any buffered data to the underlying io.Writer.
-// It sends the frame with "fin" flag set to false.
-func (w *Writer) FlushFragment() error {
-	if w.Buffered() == 0 || w.err != nil {
-		return w.err
-	}
-
-	w.err = w.flushFragment(false)
-	w.n = 0
-	w.fseq++
-
-	return w.err
-}
-
-func (w *Writer) flushFragment(fin bool) (err error) {
-	var (
-		payload = w.buf[:w.n]
-		header  = ws.Header{
-			OpCode: w.opCode(),
-			Fin:    fin,
-			Length: int64(len(payload)),
-		}
-	)
-	for _, ext := range w.extensions {
-		header, err = ext.SetBits(header)
-		if err != nil {
-			return err
-		}
-	}
-	if w.state.ClientSide() {
-		header.Masked = true
-		header.Mask = ws.NewMask()
-		ws.Cipher(payload, header.Mask, 0)
-	}
-	// Write header to the header segment of the raw buffer.
-	var (
-		offset = len(w.raw) - len(w.buf)
-		skip   = offset - ws.HeaderSize(header)
-	)
-	buf := bytesWriter{
-		buf: w.raw[skip:offset],
-	}
-	if err := ws.WriteHeader(&buf, header); err != nil {
-		// Must never be reached.
-		panic("dump header error: " + err.Error())
-	}
-	_, err = w.dest.Write(w.raw[skip : offset+w.n])
-	return err
-}
-
-func (w *Writer) opCode() ws.OpCode {
-	if w.fseq > 0 {
-		return ws.OpContinuation
-	}
-	return w.op
-}
-
-var errNoSpace = fmt.Errorf("not enough buffer space")
-
-type bytesWriter struct {
-	buf []byte
-	pos int
-}
-
-func (w *bytesWriter) Write(p []byte) (int, error) {
-	n := copy(w.buf[w.pos:], p)
-	w.pos += n
-	if n != len(p) {
-		return n, errNoSpace
-	}
-	return n, nil
-}
-
-func writeFrame(w io.Writer, s ws.State, op ws.OpCode, fin bool, p []byte) error {
-	var frame ws.Frame
-	if s.ClientSide() {
-		// Should copy bytes to prevent corruption of caller data.
-		payload := pbytes.GetLen(len(p))
-		defer pbytes.Put(payload)
-
-		copy(payload, p)
-
-		frame = ws.NewFrame(op, fin, payload)
-		frame = ws.MaskFrameInPlace(frame)
-	} else {
-		frame = ws.NewFrame(op, fin, p)
-	}
-
-	return ws.WriteFrame(w, frame)
-}
-
-// reserve calculates number of bytes need to be reserved for frame header.
-//
-// Note that instead of ws.HeaderSize() it does calculation based on the buffer
-// size, not the payload size.
-func reserve(state ws.State, n int) (offset int) {
-	var mask int
-	if state.ClientSide() {
-		mask = 4
-	}
-	switch {
-	case n <= int(len7)+mask+2:
-		return mask + 2
-	case n <= int(len16)+mask+4:
-		return mask + 4
-	default:
-		return mask + 10
-	}
-}
-
-// headerSize returns number of bytes needed to encode header of a frame with
-// given state and length.
-func headerSize(s ws.State, n int) int {
-	return ws.HeaderSize(ws.Header{
-		Length: int64(n),
-		Masked: s.ClientSide(),
-	})
-}
diff --git a/vendor/github.com/gobwas/ws/wsutil/wsutil.go b/vendor/github.com/gobwas/ws/wsutil/wsutil.go
deleted file mode 100644
index 86211f3..0000000
--- a/vendor/github.com/gobwas/ws/wsutil/wsutil.go
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
-Package wsutil provides utilities for working with WebSocket protocol.
-
-Overview:
-
-	// Read masked text message from peer and check utf8 encoding.
-	header, err := ws.ReadHeader(conn)
-	if err != nil {
-		// handle err
-	}
-
-	// Prepare to read payload.
-	r := io.LimitReader(conn, header.Length)
-	r = wsutil.NewCipherReader(r, header.Mask)
-	r = wsutil.NewUTF8Reader(r)
-
-	payload, err := ioutil.ReadAll(r)
-	if err != nil {
-		// handle err
-	}
-
-You could get the same behavior using just `wsutil.Reader`:
-
-	r := wsutil.Reader{
-		Source:    conn,
-		CheckUTF8: true,
-	}
-
-	payload, err := ioutil.ReadAll(r)
-	if err != nil {
-		// handle err
-	}
-
-Or even simplest:
-
-	payload, err := wsutil.ReadClientText(conn)
-	if err != nil {
-		// handle err
-	}
-
-Package is also exports tools for buffered writing:
-
-	// Create buffered writer, that will buffer output bytes and send them as
-	// 128-length fragments (with exception on large writes, see the doc).
-	writer := wsutil.NewWriterSize(conn, ws.StateServerSide, ws.OpText, 128)
-
-	_, err := io.CopyN(writer, rand.Reader, 100)
-	if err == nil {
-		err = writer.Flush()
-	}
-	if err != nil {
-		// handle error
-	}
-
-For more utils and helpers see the documentation.
-*/
-package wsutil
diff --git a/vendor/github.com/nbd-wtf/go-nostr/.gitignore b/vendor/github.com/nbd-wtf/go-nostr/.gitignore
index 6c36ead..b1195ff 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/.gitignore
+++ b/vendor/github.com/nbd-wtf/go-nostr/.gitignore
@@ -1 +1,3 @@
 go-nostr
+libsecp256k1
+knowledge.md
diff --git a/vendor/github.com/nbd-wtf/go-nostr/README.md b/vendor/github.com/nbd-wtf/go-nostr/README.md
index 474c2cf..96ec4a1 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/README.md
+++ b/vendor/github.com/nbd-wtf/go-nostr/README.md
@@ -145,6 +145,14 @@ But to use it you need the host to have it installed as a shared library and CGO
 
 To use it, use `-tags=libsecp256k1` whenever you're compiling your program that uses this library.
 
+### Test for Wasm
+
+Install [wasmbrowsertest](https://github.com/agnivade/wasmbrowsertest), then run tests:
+
+```sh
+GOOS=js GOARCH=wasm go test -short ./...
+```
+
 ## Warning: risk of goroutine bloat (if used incorrectly)
 
 Remember to cancel subscriptions, either by calling `.Unsub()` on them or ensuring their `context.Context` will be canceled at some point.
diff --git a/vendor/github.com/nbd-wtf/go-nostr/connection.go b/vendor/github.com/nbd-wtf/go-nostr/connection.go
index da1161d..fe60386 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/connection.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/connection.go
@@ -1,181 +1,59 @@
 package nostr
 
 import (
-	"bytes"
-	"compress/flate"
 	"context"
 	"crypto/tls"
 	"errors"
 	"fmt"
 	"io"
-	"net"
 	"net/http"
+	"time"
 
-	"github.com/gobwas/httphead"
-	"github.com/gobwas/ws"
-	"github.com/gobwas/ws/wsflate"
-	"github.com/gobwas/ws/wsutil"
+	ws "github.com/coder/websocket"
 )
 
 type Connection struct {
-	conn              net.Conn
-	enableCompression bool
-	controlHandler    wsutil.FrameHandlerFunc
-	flateReader       *wsflate.Reader
-	reader            *wsutil.Reader
-	flateWriter       *wsflate.Writer
-	writer            *wsutil.Writer
-	msgStateR         *wsflate.MessageState
-	msgStateW         *wsflate.MessageState
+	conn *ws.Conn
 }
 
 func NewConnection(ctx context.Context, url string, requestHeader http.Header, tlsConfig *tls.Config) (*Connection, error) {
-	dialer := ws.Dialer{
-		Header: ws.HandshakeHeaderHTTP(requestHeader),
-		Extensions: []httphead.Option{
-			wsflate.DefaultParameters.Option(),
-		},
-		TLSConfig: tlsConfig,
-	}
-	conn, _, hs, err := dialer.Dial(ctx, url)
+	c, _, err := ws.Dial(ctx, url, getConnectionOptions(requestHeader, tlsConfig))
 	if err != nil {
-		return nil, fmt.Errorf("failed to dial: %w", err)
+		return nil, err
 	}
 
-	enableCompression := false
-	state := ws.StateClientSide
-	for _, extension := range hs.Extensions {
-		if string(extension.Name) == wsflate.ExtensionName {
-			enableCompression = true
-			state |= ws.StateExtended
-			break
-		}
-	}
-
-	// reader
-	var flateReader *wsflate.Reader
-	var msgStateR wsflate.MessageState
-	if enableCompression {
-		msgStateR.SetCompressed(true)
-
-		flateReader = wsflate.NewReader(nil, func(r io.Reader) wsflate.Decompressor {
-			return flate.NewReader(r)
-		})
-	}
-
-	controlHandler := wsutil.ControlFrameHandler(conn, ws.StateClientSide)
-	reader := &wsutil.Reader{
-		Source:         conn,
-		State:          state,
-		OnIntermediate: controlHandler,
-		CheckUTF8:      false,
-		Extensions: []wsutil.RecvExtension{
-			&msgStateR,
-		},
-	}
-
-	// writer
-	var flateWriter *wsflate.Writer
-	var msgStateW wsflate.MessageState
-	if enableCompression {
-		msgStateW.SetCompressed(true)
-
-		flateWriter = wsflate.NewWriter(nil, func(w io.Writer) wsflate.Compressor {
-			fw, err := flate.NewWriter(w, 4)
-			if err != nil {
-				InfoLogger.Printf("Failed to create flate writer: %v", err)
-			}
-			return fw
-		})
-	}
-
-	writer := wsutil.NewWriter(conn, state, ws.OpText)
-	writer.SetExtensions(&msgStateW)
+	c.SetReadLimit(262144) // this should be enough for contact lists of over 2000 people
 
 	return &Connection{
-		conn:              conn,
-		enableCompression: enableCompression,
-		controlHandler:    controlHandler,
-		flateReader:       flateReader,
-		reader:            reader,
-		msgStateR:         &msgStateR,
-		flateWriter:       flateWriter,
-		writer:            writer,
-		msgStateW:         &msgStateW,
+		conn: c,
 	}, nil
 }
 
 func (c *Connection) WriteMessage(ctx context.Context, data []byte) error {
-	select {
-	case <-ctx.Done():
-		return errors.New("context canceled")
-	default:
-	}
-
-	if c.msgStateW.IsCompressed() && c.enableCompression {
-		c.flateWriter.Reset(c.writer)
-		if _, err := io.Copy(c.flateWriter, bytes.NewReader(data)); err != nil {
-			return fmt.Errorf("failed to write message: %w", err)
-		}
-
-		if err := c.flateWriter.Close(); err != nil {
-			return fmt.Errorf("failed to close flate writer: %w", err)
-		}
-	} else {
-		if _, err := io.Copy(c.writer, bytes.NewReader(data)); err != nil {
-			return fmt.Errorf("failed to write message: %w", err)
-		}
-	}
-
-	if err := c.writer.Flush(); err != nil {
-		return fmt.Errorf("failed to flush writer: %w", err)
+	if err := c.conn.Write(ctx, ws.MessageText, data); err != nil {
+		return fmt.Errorf("failed to write message: %w", err)
 	}
 
 	return nil
 }
 
 func (c *Connection) ReadMessage(ctx context.Context, buf io.Writer) error {
-	for {
-		select {
-		case <-ctx.Done():
-			return errors.New("context canceled")
-		default:
-		}
-
-		h, err := c.reader.NextFrame()
-		if err != nil {
-			c.conn.Close()
-			return fmt.Errorf("failed to advance frame: %w", err)
-		}
-
-		if h.OpCode.IsControl() {
-			if err := c.controlHandler(h, c.reader); err != nil {
-				return fmt.Errorf("failed to handle control frame: %w", err)
-			}
-		} else if h.OpCode == ws.OpBinary ||
-			h.OpCode == ws.OpText {
-			break
-		}
-
-		if err := c.reader.Discard(); err != nil {
-			return fmt.Errorf("failed to discard: %w", err)
-		}
+	_, reader, err := c.conn.Reader(ctx)
+	if err != nil {
+		return fmt.Errorf("failed to get reader: %w", err)
 	}
-
-	if c.msgStateR.IsCompressed() && c.enableCompression {
-		c.flateReader.Reset(c.reader)
-		if _, err := io.Copy(buf, c.flateReader); err != nil {
-			return fmt.Errorf("failed to read message: %w", err)
-		}
-	} else {
-		if _, err := io.Copy(buf, c.reader); err != nil {
-			return fmt.Errorf("failed to read message: %w", err)
-		}
+	if _, err := io.Copy(buf, reader); err != nil {
+		return fmt.Errorf("failed to read message: %w", err)
 	}
-
 	return nil
 }
 
 func (c *Connection) Close() error {
-	return c.conn.Close()
+	return c.conn.Close(ws.StatusNormalClosure, "")
+}
+
+func (c *Connection) Ping(ctx context.Context) error {
+	ctx, cancel := context.WithTimeoutCause(ctx, time.Millisecond*800, errors.New("ping took too long"))
+	defer cancel()
+	return c.conn.Ping(ctx)
 }
diff --git a/vendor/github.com/nbd-wtf/go-nostr/connection_options.go b/vendor/github.com/nbd-wtf/go-nostr/connection_options.go
new file mode 100644
index 0000000..69a8bf9
--- /dev/null
+++ b/vendor/github.com/nbd-wtf/go-nostr/connection_options.go
@@ -0,0 +1,34 @@
+//go:build !js
+
+package nostr
+
+import (
+	"crypto/tls"
+	"net/http"
+	"net/textproto"
+
+	ws "github.com/coder/websocket"
+)
+
+var defaultConnectionOptions = &ws.DialOptions{
+	CompressionMode: ws.CompressionContextTakeover,
+	HTTPHeader: http.Header{
+		textproto.CanonicalMIMEHeaderKey("User-Agent"): {"github.com/nbd-wtf/go-nostr"},
+	},
+}
+
+func getConnectionOptions(requestHeader http.Header, tlsConfig *tls.Config) *ws.DialOptions {
+	if requestHeader == nil && tlsConfig == nil {
+		return defaultConnectionOptions
+	}
+
+	return &ws.DialOptions{
+		HTTPHeader:      requestHeader,
+		CompressionMode: ws.CompressionContextTakeover,
+		HTTPClient: &http.Client{
+			Transport: &http.Transport{
+				TLSClientConfig: tlsConfig,
+			},
+		},
+	}
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/connection_options_js.go b/vendor/github.com/nbd-wtf/go-nostr/connection_options_js.go
new file mode 100644
index 0000000..3e80025
--- /dev/null
+++ b/vendor/github.com/nbd-wtf/go-nostr/connection_options_js.go
@@ -0,0 +1,15 @@
+package nostr
+
+import (
+	"crypto/tls"
+	"net/http"
+
+	ws "github.com/coder/websocket"
+)
+
+var emptyOptions = ws.DialOptions{}
+
+func getConnectionOptions(_ http.Header, _ *tls.Config) *ws.DialOptions {
+	// on javascript we ignore everything because there is nothing else we can do
+	return &emptyOptions
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/event.go b/vendor/github.com/nbd-wtf/go-nostr/event.go
index cce165e..2dd9a97 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/event.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/event.go
@@ -16,9 +16,6 @@ type Event struct {
 	Tags      Tags
 	Content   string
 	Sig       string
-
-	// anything here will be mashed together with the main event object when serializing
-	extra map[string]any
 }
 
 // Event Stringer interface, just returns the raw JSON as a string.
diff --git a/vendor/github.com/nbd-wtf/go-nostr/event_easyjson.go b/vendor/github.com/nbd-wtf/go-nostr/event_easyjson.go
index 435583c..8888b9c 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/event_easyjson.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/event_easyjson.go
@@ -22,7 +22,6 @@ func easyjsonF642ad3eDecodeGithubComNbdWtfGoNostr(in *jlexer.Lexer, out *Event)
 		in.Skip()
 		return
 	}
-	out.extra = make(map[string]any)
 	in.Delim('{')
 	for !in.IsDelim('}') {
 		key := in.UnsafeFieldName(true)
@@ -85,8 +84,6 @@ func easyjsonF642ad3eDecodeGithubComNbdWtfGoNostr(in *jlexer.Lexer, out *Event)
 			out.Content = in.String()
 		case "sig":
 			out.Sig = in.String()
-		default:
-			out.extra[key] = in.Interface()
 		}
 		in.WantComma()
 	}
@@ -155,12 +152,6 @@ func easyjsonF642ad3eEncodeGithubComNbdWtfGoNostr(out *jwriter.Writer, in Event)
 			out.String(in.Sig)
 		}
 	}
-	{
-		for key, value := range in.extra {
-			out.RawString(",\"" + key + "\":")
-			out.Raw(json.Marshal(value))
-		}
-	}
 	out.RawByte('}')
 }
 
diff --git a/vendor/github.com/nbd-wtf/go-nostr/event_extra.go b/vendor/github.com/nbd-wtf/go-nostr/event_extra.go
deleted file mode 100644
index 46263b3..0000000
--- a/vendor/github.com/nbd-wtf/go-nostr/event_extra.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package nostr
-
-// Deprecated: this was never a good idea, stop using.
-func (evt *Event) SetExtra(key string, value any) {
-	if evt.extra == nil {
-		evt.extra = make(map[string]any)
-	}
-	evt.extra[key] = value
-}
-
-// Deprecated: this was never a good idea, stop using.
-func (evt *Event) RemoveExtra(key string) {
-	if evt.extra == nil {
-		return
-	}
-	delete(evt.extra, key)
-}
-
-// Deprecated: this was never a good idea, stop using.
-func (evt Event) GetExtra(key string) any {
-	ival, _ := evt.extra[key]
-	return ival
-}
-
-// Deprecated: this was never a good idea, stop using.
-func (evt Event) GetExtraString(key string) string {
-	ival, ok := evt.extra[key]
-	if !ok {
-		return ""
-	}
-	val, ok := ival.(string)
-	if !ok {
-		return ""
-	}
-	return val
-}
-
-// Deprecated: this was never a good idea, stop using.
-func (evt Event) GetExtraNumber(key string) float64 {
-	ival, ok := evt.extra[key]
-	if !ok {
-		return 0
-	}
-
-	switch val := ival.(type) {
-	case float64:
-		return val
-	case int:
-		return float64(val)
-	case int64:
-		return float64(val)
-	}
-
-	return 0
-}
-
-// Deprecated: this was never a good idea, stop using.
-func (evt Event) GetExtraBoolean(key string) bool {
-	ival, ok := evt.extra[key]
-	if !ok {
-		return false
-	}
-	val, ok := ival.(bool)
-	if !ok {
-		return false
-	}
-	return val
-}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/helpers.go b/vendor/github.com/nbd-wtf/go-nostr/helpers.go
index 9217e00..5370827 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/helpers.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/helpers.go
@@ -1,6 +1,7 @@
 package nostr
 
 import (
+	"bytes"
 	"strconv"
 	"strings"
 	"sync"
@@ -117,3 +118,36 @@ func isLowerHex(thing string) bool {
 	}
 	return true
 }
+
+func extractSubID(jsonStr []byte) string {
+	// look for "EVENT" pattern
+	start := bytes.Index(jsonStr, []byte(`"EVENT"`))
+	if start == -1 {
+		return ""
+	}
+
+	// move to the next quote
+	offset := bytes.Index(jsonStr[start+7:], []byte{'"'})
+	start += 7 + offset + 1
+
+	// find the ending quote
+	end := bytes.Index(jsonStr[start:], []byte{'"'})
+
+	// get the contents
+	return string(jsonStr[start : start+end])
+}
+
+func extractEventID(jsonStr []byte) string {
+	// look for "id": pattern
+	start := bytes.Index(jsonStr, []byte(`"id":`))
+	if start == -1 {
+		return ""
+	}
+
+	// move to the next quote
+	offset := bytes.Index(jsonStr[start+4:], []byte{'"'})
+	start += 4 + offset + 1
+
+	// get 64 characters of the id
+	return string(jsonStr[start : start+64])
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/justfile b/vendor/github.com/nbd-wtf/go-nostr/justfile
new file mode 100644
index 0000000..5f00bd5
--- /dev/null
+++ b/vendor/github.com/nbd-wtf/go-nostr/justfile
@@ -0,0 +1,23 @@
+list:
+    @just --list
+
+vendor-libsecp256k1:
+    #!/usr/bin/env fish
+    rm -r libsecp256k1
+    mkdir libsecp256k1
+    mkdir libsecp256k1/include
+    mkdir libsecp256k1/src
+    mkdir libsecp256k1/src/asm
+    mkdir libsecp256k1/src/modules
+    mkdir libsecp256k1/src/modules/extrakeys
+    mkdir libsecp256k1/src/modules/schnorrsig
+
+    wget https://api.github.com/repos/bitcoin-core/secp256k1/tarball/v0.6.0 -O libsecp256k1.tar.gz
+    tar -xvf libsecp256k1.tar.gz
+    rm libsecp256k1.tar.gz
+    cd bitcoin-core-secp256k1-*
+    for f in include/secp256k1.h include/secp256k1_ecdh.h include/secp256k1_ellswift.h include/secp256k1_extrakeys.h include/secp256k1_preallocated.h include/secp256k1_recovery.h include/secp256k1_schnorrsig.h src/asm/field_10x26_arm.s src/assumptions.h src/bench.c src/bench.h src/bench_ecmult.c src/bench_internal.c src/checkmem.h src/ecdsa.h src/ecdsa_impl.h src/eckey.h src/eckey_impl.h src/ecmult.h src/ecmult_compute_table.h src/ecmult_compute_table_impl.h src/ecmult_const.h src/ecmult_const_impl.h src/ecmult_gen.h src/ecmult_gen_compute_table.h src/ecmult_gen_compute_table_impl.h src/ecmult_gen_impl.h src/ecmult_impl.h src/field.h src/field_10x26.h src/field_10x26_impl.h src/field_5x52.h src/field_5x52_impl.h src/field_5x52_int128_impl.h src/field_impl.h src/group.h src/group_impl.h src/hash.h src/hash_impl.h src/hsort.h src/hsort_impl.h src/int128.h src/int128_impl.h src/int128_native.h src/int128_native_impl.h src/int128_struct.h src/int128_struct_impl.h src/modinv32.h src/modinv32_impl.h src/modinv64.h src/modinv64_impl.h src/modules/extrakeys/main_impl.h src/modules/schnorrsig/main_impl.h src/precompute_ecmult.c src/precompute_ecmult_gen.c src/precomputed_ecmult.c src/precomputed_ecmult.h src/precomputed_ecmult_gen.c src/precomputed_ecmult_gen.h src/scalar.h src/scalar_4x64.h src/scalar_4x64_impl.h src/scalar_8x32.h src/scalar_8x32_impl.h src/scalar_impl.h src/scalar_low.h src/scalar_low_impl.h src/scratch.h src/scratch_impl.h src/secp256k1.c src/selftest.h src/util.h
+        mv $f ../libsecp256k1/$f
+    end
+    cd ..
+    rm -r bitcoin-core-secp256k1-*
diff --git a/vendor/github.com/nbd-wtf/go-nostr/kinds.go b/vendor/github.com/nbd-wtf/go-nostr/kinds.go
index e31e112..28343d2 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/kinds.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/kinds.go
@@ -25,6 +25,7 @@ const (
 	KindChannelMuteUser          int = 44
 	KindChess                    int = 64
 	KindMergeRequests            int = 818
+	KindComment                  int = 1111
 	KindBid                      int = 1021
 	KindBidConfirmation          int = 1022
 	KindOpenTimestamps           int = 1040
diff --git a/vendor/github.com/nbd-wtf/go-nostr/nip11/types.go b/vendor/github.com/nbd-wtf/go-nostr/nip11/types.go
index d687668..e6ae1de 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/nip11/types.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/nip11/types.go
@@ -15,14 +15,15 @@ type RelayInformationDocument struct {
 	Software      string `json:"software"`
 	Version       string `json:"version"`
 
-	Limitation     *RelayLimitationDocument `json:"limitation,omitempty"`
-	RelayCountries []string                 `json:"relay_countries,omitempty"`
-	LanguageTags   []string                 `json:"language_tags,omitempty"`
-	Tags           []string                 `json:"tags,omitempty"`
-	PostingPolicy  string                   `json:"posting_policy,omitempty"`
-	PaymentsURL    string                   `json:"payments_url,omitempty"`
-	Fees           *RelayFeesDocument       `json:"fees,omitempty"`
-	Icon           string                   `json:"icon"`
+	Limitation     *RelayLimitationDocument  `json:"limitation,omitempty"`
+	RelayCountries []string                  `json:"relay_countries,omitempty"`
+	LanguageTags   []string                  `json:"language_tags,omitempty"`
+	Tags           []string                  `json:"tags,omitempty"`
+	PostingPolicy  string                    `json:"posting_policy,omitempty"`
+	PaymentsURL    string                    `json:"payments_url,omitempty"`
+	Fees           *RelayFeesDocument        `json:"fees,omitempty"`
+	Retention      []*RelayRetentionDocument `json:"retention,omitempty"`
+	Icon           string                    `json:"icon"`
 }
 
 func (info *RelayInformationDocument) AddSupportedNIP(number int) {
@@ -34,6 +35,12 @@ func (info *RelayInformationDocument) AddSupportedNIP(number int) {
 	info.SupportedNIPs = append(info.SupportedNIPs, number)
 }
 
+func (info *RelayInformationDocument) AddSupportedNIPs(numbers []int) {
+	for _, n := range numbers {
+		info.AddSupportedNIP(n)
+	}
+}
+
 type RelayLimitationDocument struct {
 	MaxMessageLength int  `json:"max_message_length,omitempty"`
 	MaxSubscriptions int  `json:"max_subscriptions,omitempty"`
@@ -64,3 +71,9 @@ type RelayFeesDocument struct {
 		Unit   string `json:"unit"`
 	} `json:"publication,omitempty"`
 }
+
+type RelayRetentionDocument struct {
+	Time  int64   `json:"time,omitempty"`
+	Count int     `json:"count,omitempty"`
+	Kinds [][]int `json:"kinds,omitempty"`
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/nip40/nip40.go b/vendor/github.com/nbd-wtf/go-nostr/nip40/nip40.go
new file mode 100644
index 0000000..f1cadfd
--- /dev/null
+++ b/vendor/github.com/nbd-wtf/go-nostr/nip40/nip40.go
@@ -0,0 +1,22 @@
+package nip40
+
+import (
+	"strconv"
+
+	"github.com/nbd-wtf/go-nostr"
+)
+
+// GetExpiration returns the expiration timestamp for this event, or -1 if no "expiration" tag exists or
+// if it is invalid.
+func GetExpiration(tags nostr.Tags) nostr.Timestamp {
+	for _, tag := range tags {
+		if len(tag) >= 2 && tag[0] == "expiration" {
+			if ts, err := strconv.ParseInt(tag[1], 10, 64); err == nil {
+				return nostr.Timestamp(ts)
+			} else {
+				return -1
+			}
+		}
+	}
+	return -1
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/normalize.go b/vendor/github.com/nbd-wtf/go-nostr/normalize.go
index 6d5ce07..6211b3c 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/normalize.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/normalize.go
@@ -13,8 +13,6 @@ func NormalizeURL(u string) string {
 	}
 
 	u = strings.TrimSpace(u)
-	u = strings.ToLower(u)
-
 	if fqn := strings.Split(u, ":")[0]; fqn == "localhost" || fqn == "127.0.0.1" {
 		u = "ws://" + u
 	} else if !strings.HasPrefix(u, "http") && !strings.HasPrefix(u, "ws") {
@@ -32,6 +30,7 @@ func NormalizeURL(u string) string {
 		p.Scheme = "wss"
 	}
 
+	p.Host = strings.ToLower(p.Host)
 	p.Path = strings.TrimRight(p.Path, "/")
 
 	return p.String()
diff --git a/vendor/github.com/nbd-wtf/go-nostr/pointers.go b/vendor/github.com/nbd-wtf/go-nostr/pointers.go
index 9f4da04..732b3a7 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/pointers.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/pointers.go
@@ -1,10 +1,28 @@
 package nostr
 
+import (
+	"fmt"
+)
+
+type Pointer interface {
+	AsTagReference() string
+	AsTag() Tag
+}
+
 type ProfilePointer struct {
 	PublicKey string   `json:"pubkey"`
 	Relays    []string `json:"relays,omitempty"`
 }
 
+func (ep ProfilePointer) AsTagReference() string { return ep.PublicKey }
+
+func (ep ProfilePointer) AsTag() Tag {
+	if len(ep.Relays) > 0 {
+		return Tag{"p", ep.PublicKey, ep.Relays[0]}
+	}
+	return Tag{"p", ep.PublicKey}
+}
+
 type EventPointer struct {
 	ID     string   `json:"id"`
 	Relays []string `json:"relays,omitempty"`
@@ -12,9 +30,33 @@ type EventPointer struct {
 	Kind   int      `json:"kind,omitempty"`
 }
 
+func (ep EventPointer) AsTagReference() string { return ep.ID }
+
+func (ep EventPointer) AsTag() Tag {
+	if len(ep.Relays) > 0 {
+		if ep.Author != "" {
+			return Tag{"e", ep.ID, ep.Relays[0], ep.Author}
+		} else {
+			return Tag{"e", ep.ID, ep.Relays[0]}
+		}
+	}
+	return Tag{"e", ep.ID}
+}
+
 type EntityPointer struct {
 	PublicKey  string   `json:"pubkey"`
 	Kind       int      `json:"kind,omitempty"`
 	Identifier string   `json:"identifier,omitempty"`
 	Relays     []string `json:"relays,omitempty"`
 }
+
+func (ep EntityPointer) AsTagReference() string {
+	return fmt.Sprintf("%d:%s:%s", ep.Kind, ep.PublicKey, ep.Identifier)
+}
+
+func (ep EntityPointer) AsTag() Tag {
+	if len(ep.Relays) > 0 {
+		return Tag{"a", ep.AsTagReference(), ep.Relays[0]}
+	}
+	return Tag{"a", ep.AsTagReference()}
+}
diff --git a/vendor/github.com/nbd-wtf/go-nostr/pool.go b/vendor/github.com/nbd-wtf/go-nostr/pool.go
index 743f3bd..cf5ace6 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/pool.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/pool.go
@@ -2,9 +2,11 @@ package nostr
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"log"
 	"math"
+	"net/http"
 	"slices"
 	"strings"
 	"sync"
@@ -23,19 +25,20 @@ type SimplePool struct {
 	Context context.Context
 
 	authHandler func(context.Context, RelayEvent) error
-	cancel      context.CancelFunc
+	cancel      context.CancelCauseFunc
 
-	eventMiddleware func(RelayEvent)
-	queryMiddleware func(relay string, pubkey string, kind int)
+	eventMiddleware     func(RelayEvent)
+	duplicateMiddleware func(relay string, id string)
+	queryMiddleware     func(relay string, pubkey string, kind int)
 
 	// custom things not often used
 	penaltyBoxMu sync.Mutex
 	penaltyBox   map[string][2]float64
-	userAgent    string
+	relayOptions []RelayOption
 }
 
-type DirectedFilters struct {
-	Filters
+type DirectedFilter struct {
+	Filter
 	Relay string
 }
 
@@ -53,7 +56,7 @@ type PoolOption interface {
 }
 
 func NewSimplePool(ctx context.Context, opts ...PoolOption) *SimplePool {
-	ctx, cancel := context.WithCancel(ctx)
+	ctx, cancel := context.WithCancelCause(ctx)
 
 	pool := &SimplePool{
 		Relays: xsync.NewMapOf[string, *Relay](),
@@ -69,6 +72,17 @@ func NewSimplePool(ctx context.Context, opts ...PoolOption) *SimplePool {
 	return pool
 }
 
+// WithRelayOptions sets options that will be used on every relay instance created by this pool.
+func WithRelayOptions(ropts ...RelayOption) withRelayOptionsOpt {
+	return ropts
+}
+
+type withRelayOptionsOpt []RelayOption
+
+func (h withRelayOptionsOpt) ApplyPoolOption(pool *SimplePool) {
+	pool.relayOptions = h
+}
+
 // WithAuthHandler must be a function that signs the auth event when called.
 // it will be called whenever any relay in the pool returns a `CLOSED` message
 // with the "auth-required:" prefix, only once for each relay
@@ -121,28 +135,26 @@ func (h WithEventMiddleware) ApplyPoolOption(pool *SimplePool) {
 	pool.eventMiddleware = h
 }
 
-// WithQueryMiddleware is a function that will be called with every combination of relay+pubkey+kind queried
-// in a .SubMany*() call -- when applicable (i.e. when the query contains a pubkey and a kind).
-type WithQueryMiddleware func(relay string, pubkey string, kind int)
+// WithDuplicateMiddleware is a function that will be called with all duplicate ids received.
+type WithDuplicateMiddleware func(relay string, id string)
 
-func (h WithQueryMiddleware) ApplyPoolOption(pool *SimplePool) {
-	pool.queryMiddleware = h
+func (h WithDuplicateMiddleware) ApplyPoolOption(pool *SimplePool) {
+	pool.duplicateMiddleware = h
 }
 
-// WithUserAgent sets the user-agent header for all relay connections in the pool.
-func WithUserAgent(userAgent string) withUserAgentOpt { return withUserAgentOpt(userAgent) }
+// WithQueryMiddleware is a function that will be called with every combination of relay+pubkey+kind queried
+// in a .SubMany*() call -- when applicable (i.e. when the query contains a pubkey and a kind).
+type WithAuthorKindQueryMiddleware func(relay string, pubkey string, kind int)
 
-type withUserAgentOpt string
-
-func (h withUserAgentOpt) ApplyPoolOption(pool *SimplePool) {
-	pool.userAgent = string(h)
+func (h WithAuthorKindQueryMiddleware) ApplyPoolOption(pool *SimplePool) {
+	pool.queryMiddleware = h
 }
 
 var (
 	_ PoolOption = (WithAuthHandler)(nil)
 	_ PoolOption = (WithEventMiddleware)(nil)
 	_ PoolOption = WithPenaltyBox()
-	_ PoolOption = WithUserAgent("")
+	_ PoolOption = WithRelayOptions(WithRequestHeader(http.Header{}))
 )
 
 func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
@@ -166,12 +178,14 @@ func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
 
 	// try to connect
 	// we use this ctx here so when the pool dies everything dies
-	ctx, cancel := context.WithTimeout(pool.Context, time.Second*15)
+	ctx, cancel := context.WithTimeoutCause(
+		pool.Context,
+		time.Second*15,
+		errors.New("connecting to the relay took too long"),
+	)
 	defer cancel()
 
-	relay = NewRelay(context.Background(), url)
-	relay.RequestHeader.Set("User-Agent", pool.userAgent)
-
+	relay = NewRelay(context.Background(), url, pool.relayOptions...)
 	if err := relay.Connect(ctx); err != nil {
 		if pool.penaltyBox != nil {
 			// putting relay in penalty box
@@ -220,26 +234,6 @@ func (pool *SimplePool) SubMany(
 	urls []string,
 	filters Filters,
 	opts ...SubscriptionOption,
-) chan RelayEvent {
-	return pool.subMany(ctx, urls, filters, true, opts)
-}
-
-// SubManyNonUnique is like SubMany, but returns duplicate events if they come from different relays
-func (pool *SimplePool) SubManyNonUnique(
-	ctx context.Context,
-	urls []string,
-	filters Filters,
-	opts ...SubscriptionOption,
-) chan RelayEvent {
-	return pool.subMany(ctx, urls, filters, false, opts)
-}
-
-func (pool *SimplePool) subMany(
-	ctx context.Context,
-	urls []string,
-	filters Filters,
-	unique bool,
-	opts []SubscriptionOption,
 ) chan RelayEvent {
 	ctx, cancel := context.WithCancel(ctx)
 	_ = cancel // do this so `go vet` will stop complaining
@@ -298,7 +292,13 @@ func (pool *SimplePool) subMany(
 				hasAuthed = false
 
 			subscribe:
-				sub, err = relay.Subscribe(ctx, filters, opts...)
+				sub, err = relay.Subscribe(ctx, filters, append(opts, WithCheckDuplicate(func(id, relay string) bool {
+					_, exists := seenAlready.Load(id)
+					if exists && pool.duplicateMiddleware != nil {
+						pool.duplicateMiddleware(relay, id)
+					}
+					return exists
+				}))...)
 				if err != nil {
 					goto reconnect
 				}
@@ -330,11 +330,7 @@ func (pool *SimplePool) subMany(
 							mh(ie)
 						}
 
-						if unique {
-							if _, seen := seenAlready.LoadOrStore(evt.ID, evt.CreatedAt); seen {
-								continue
-							}
-						}
+						seenAlready.Store(evt.ID, evt.CreatedAt)
 
 						select {
 						case events <- ie:
@@ -344,12 +340,11 @@ func (pool *SimplePool) subMany(
 					case <-ticker.C:
 						if eose {
 							old := Timestamp(time.Now().Add(-seenAlreadyDropTick).Unix())
-							seenAlready.Range(func(id string, value Timestamp) bool {
+							for id, value := range seenAlready.Range {
 								if value < old {
 									seenAlready.Delete(id)
 								}
-								return true
-							})
+							}
 						}
 					case reason := <-sub.ClosedReason:
 						if strings.HasPrefix(reason, "auth-required:") && pool.authHandler != nil && !hasAuthed {
@@ -389,37 +384,36 @@ func (pool *SimplePool) SubManyEose(
 	filters Filters,
 	opts ...SubscriptionOption,
 ) chan RelayEvent {
-	return pool.subManyEose(ctx, urls, filters, true, opts)
+	seenAlready := xsync.NewMapOf[string, bool]()
+	return pool.subManyEoseNonOverwriteCheckDuplicate(ctx, urls, filters, WithCheckDuplicate(func(id, relay string) bool {
+		_, exists := seenAlready.Load(id)
+		if exists && pool.duplicateMiddleware != nil {
+			pool.duplicateMiddleware(relay, id)
+		}
+		return exists
+	}), seenAlready, opts...)
 }
 
-// SubManyEoseNonUnique is like SubManyEose, but returns duplicate events if they come from different relays
-func (pool *SimplePool) SubManyEoseNonUnique(
+func (pool *SimplePool) subManyEoseNonOverwriteCheckDuplicate(
 	ctx context.Context,
 	urls []string,
 	filters Filters,
+	wcd WithCheckDuplicate,
+	seenAlready *xsync.MapOf[string, bool],
 	opts ...SubscriptionOption,
 ) chan RelayEvent {
-	return pool.subManyEose(ctx, urls, filters, false, opts)
-}
-
-func (pool *SimplePool) subManyEose(
-	ctx context.Context,
-	urls []string,
-	filters Filters,
-	unique bool,
-	opts []SubscriptionOption,
-) chan RelayEvent {
-	ctx, cancel := context.WithCancel(ctx)
+	ctx, cancel := context.WithCancelCause(ctx)
 
 	events := make(chan RelayEvent)
-	seenAlready := xsync.NewMapOf[string, bool]()
 	wg := sync.WaitGroup{}
 	wg.Add(len(urls))
 
+	opts = append(opts, wcd)
+
 	go func() {
 		// this will happen when all subscriptions get an eose (or when they die)
 		wg.Wait()
-		cancel()
+		cancel(errors.New("all subscriptions ended"))
 		close(events)
 	}()
 
@@ -441,6 +435,7 @@ func (pool *SimplePool) subManyEose(
 
 			relay, err := pool.EnsureRelay(nm)
 			if err != nil {
+				debugLogf("error connecting to %s with %v: %s", nm, filters, err)
 				return
 			}
 
@@ -448,7 +443,7 @@ func (pool *SimplePool) subManyEose(
 
 		subscribe:
 			sub, err := relay.Subscribe(ctx, filters, opts...)
-			if sub == nil {
+			if err != nil {
 				debugLogf("error subscribing to %s with %v: %s", relay, filters, err)
 				return
 			}
@@ -482,11 +477,7 @@ func (pool *SimplePool) subManyEose(
 						mh(ie)
 					}
 
-					if unique {
-						if _, seen := seenAlready.LoadOrStore(evt.ID, true); seen {
-							continue
-						}
-					}
+					seenAlready.Store(evt.ID, true)
 
 					select {
 					case events <- ie:
@@ -508,7 +499,7 @@ func (pool *SimplePool) CountMany(
 	filter Filter,
 	opts []SubscriptionOption,
 ) int {
-	hll := hyperloglog.New(0) // offset is irrelevant here, so we just pass 0
+	hll := hyperloglog.New(0) // offset is irrelevant here
 
 	wg := sync.WaitGroup{}
 	wg.Add(len(urls))
@@ -536,47 +527,52 @@ func (pool *SimplePool) CountMany(
 
 // QuerySingle returns the first event returned by the first relay, cancels everything else.
 func (pool *SimplePool) QuerySingle(ctx context.Context, urls []string, filter Filter) *RelayEvent {
-	ctx, cancel := context.WithCancel(ctx)
-	defer cancel()
+	ctx, cancel := context.WithCancelCause(ctx)
 	for ievt := range pool.SubManyEose(ctx, urls, Filters{filter}) {
+		cancel(errors.New("got the first event and ended successfully"))
 		return &ievt
 	}
+	cancel(errors.New("SubManyEose() didn't get yield events"))
 	return nil
 }
 
-func (pool *SimplePool) batchedSubMany(
+func (pool *SimplePool) BatchedSubManyEose(
 	ctx context.Context,
-	dfs []DirectedFilters,
-	subFn func(context.Context, []string, Filters, bool, []SubscriptionOption) chan RelayEvent,
-	opts []SubscriptionOption,
+	dfs []DirectedFilter,
+	opts ...SubscriptionOption,
 ) chan RelayEvent {
 	res := make(chan RelayEvent)
+	wg := sync.WaitGroup{}
+	wg.Add(len(dfs))
+	seenAlready := xsync.NewMapOf[string, bool]()
 
 	for _, df := range dfs {
-		go func(df DirectedFilters) {
-			for ie := range subFn(ctx, []string{df.Relay}, df.Filters, true, opts) {
+		go func(df DirectedFilter) {
+			for ie := range pool.subManyEoseNonOverwriteCheckDuplicate(ctx,
+				[]string{df.Relay},
+				Filters{df.Filter},
+				WithCheckDuplicate(func(id, relay string) bool {
+					_, exists := seenAlready.Load(id)
+					if exists && pool.duplicateMiddleware != nil {
+						pool.duplicateMiddleware(relay, id)
+					}
+					return exists
+				}), seenAlready, opts...) {
 				res <- ie
 			}
+
+			wg.Done()
 		}(df)
 	}
 
+	go func() {
+		wg.Wait()
+		close(res)
+	}()
+
 	return res
 }
 
-// BatchedSubMany fires subscriptions only to specific relays, but batches them when they are the same.
-func (pool *SimplePool) BatchedSubMany(
-	ctx context.Context,
-	dfs []DirectedFilters,
-	opts ...SubscriptionOption,
-) chan RelayEvent {
-	return pool.batchedSubMany(ctx, dfs, pool.subMany, opts)
-}
-
-// BatchedSubManyEose is like BatchedSubMany, but ends upon receiving EOSE from all relays.
-func (pool *SimplePool) BatchedSubManyEose(
-	ctx context.Context,
-	dfs []DirectedFilters,
-	opts ...SubscriptionOption,
-) chan RelayEvent {
-	return pool.batchedSubMany(ctx, dfs, pool.subManyEose, opts)
+func (pool *SimplePool) Close(reason string) {
+	pool.cancel(fmt.Errorf("pool closed with reason: '%s'", reason))
 }
diff --git a/vendor/github.com/nbd-wtf/go-nostr/relay.go b/vendor/github.com/nbd-wtf/go-nostr/relay.go
index 2ef239f..18a27f5 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/relay.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/relay.go
@@ -4,6 +4,7 @@ import (
 	"bytes"
 	"context"
 	"crypto/tls"
+	"errors"
 	"fmt"
 	"log"
 	"net/http"
@@ -12,8 +13,6 @@ import (
 	"sync/atomic"
 	"time"
 
-	"github.com/gobwas/ws"
-	"github.com/gobwas/ws/wsutil"
 	"github.com/puzpuzpuz/xsync/v3"
 )
 
@@ -25,14 +24,14 @@ type Relay struct {
 	closeMutex sync.Mutex
 
 	URL           string
-	RequestHeader http.Header // e.g. for origin header
+	requestHeader http.Header // e.g. for origin header
 
 	Connection    *Connection
 	Subscriptions *xsync.MapOf[int64, *Subscription]
 
 	ConnectionError         error
 	connectionContext       context.Context // will be canceled when the connection closes
-	connectionContextCancel context.CancelFunc
+	connectionContextCancel context.CancelCauseFunc
 
 	challenge                     string       // NIP-42 challenge, we only keep the last
 	noticeHandler                 func(string) // NIP-01 NOTICEs
@@ -53,7 +52,7 @@ type writeRequest struct {
 
 // NewRelay returns a new relay. The relay connection will be closed when the context is canceled.
 func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
-	ctx, cancel := context.WithCancel(ctx)
+	ctx, cancel := context.WithCancelCause(ctx)
 	r := &Relay{
 		URL:                           NormalizeURL(url),
 		connectionContext:             ctx,
@@ -62,7 +61,7 @@ func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
 		okCallbacks:                   xsync.NewMapOf[string, func(bool, string)](),
 		writeQueue:                    make(chan writeRequest),
 		subscriptionChannelCloseQueue: make(chan *Subscription),
-		RequestHeader:                 make(http.Header, 1),
+		requestHeader:                 nil,
 	}
 
 	for _, opt := range opts {
@@ -90,6 +89,7 @@ type RelayOption interface {
 var (
 	_ RelayOption = (WithNoticeHandler)(nil)
 	_ RelayOption = (WithCustomHandler)(nil)
+	_ RelayOption = (WithRequestHeader)(nil)
 )
 
 // WithNoticeHandler just takes notices and is expected to do something with them.
@@ -108,6 +108,13 @@ func (ch WithCustomHandler) ApplyRelayOption(r *Relay) {
 	r.customHandler = ch
 }
 
+// WithRequestHeader sets the HTTP request header of the websocket preflight request.
+type WithRequestHeader http.Header
+
+func (ch WithRequestHeader) ApplyRelayOption(r *Relay) {
+	r.requestHeader = http.Header(ch)
+}
+
 // String just returns the relay URL.
 func (r *Relay) String() string {
 	return r.URL
@@ -144,15 +151,11 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
 	if _, ok := ctx.Deadline(); !ok {
 		// if no timeout is set, force it to 7 seconds
 		var cancel context.CancelFunc
-		ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
+		ctx, cancel = context.WithTimeoutCause(ctx, 7*time.Second, errors.New("connection took too long"))
 		defer cancel()
 	}
 
-	if r.RequestHeader.Get("User-Agent") == "" {
-		r.RequestHeader.Set("User-Agent", "github.com/nbd-wtf/go-nostr")
-	}
-
-	conn, err := NewConnection(ctx, r.URL, r.RequestHeader, tlsConfig)
+	conn, err := NewConnection(ctx, r.URL, r.requestHeader, tlsConfig)
 	if err != nil {
 		return fmt.Errorf("error opening websocket to '%s': %w", r.URL, err)
 	}
@@ -173,7 +176,7 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
 
 		// close all subscriptions
 		for _, sub := range r.Subscriptions.Range {
-			sub.Unsub()
+			sub.unsub(fmt.Errorf("relay connection closed: %w / %w", context.Cause(r.connectionContext), r.ConnectionError))
 		}
 	}()
 
@@ -183,7 +186,7 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
 			select {
 			case <-ticker.C:
 				if r.Connection != nil {
-					err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil)
+					err := r.Connection.Ping(r.connectionContext)
 					if err != nil {
 						InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err)
 						r.Close() // this should trigger a context cancelation
@@ -210,14 +213,26 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
 
 		for {
 			buf.Reset()
+
 			if err := conn.ReadMessage(r.connectionContext, buf); err != nil {
 				r.ConnectionError = err
-				r.Close()
+				r.close(err)
 				break
 			}
 
 			message := buf.Bytes()
 			debugLogf("{%s} received %v\n", r.URL, message)
+
+			// if this is an "EVENT" we will have this preparser logic that should speed things up a little
+			// as we skip handling duplicate events
+			subid := extractSubID(message)
+			subscription, ok := r.Subscriptions.Load(subIdToSerial(subid))
+			if ok && subscription.checkDuplicate != nil {
+				if subscription.checkDuplicate(extractEventID(message[10+len(subid):]), r.URL) {
+					continue
+				}
+			}
+
 			envelope := ParseMessage(message)
 			if envelope == nil {
 				if r.customHandler != nil {
@@ -240,11 +255,8 @@ func (r *Relay) ConnectWithTLS(ctx context.Context, tlsConfig *tls.Config) error
 				}
 				r.challenge = *env.Challenge
 			case *EventEnvelope:
-				if env.SubscriptionID == nil {
-					continue
-				}
-
-				if subscription, ok := r.Subscriptions.Load(subIdToSerial(*env.SubscriptionID)); !ok {
+				// we already have the subscription from the pre-check above, so we can just reuse it
+				if subscription == nil {
 					// InfoLogger.Printf("{%s} no subscription with id '%s'\n", r.URL, *env.SubscriptionID)
 					continue
 				} else {
@@ -397,7 +409,7 @@ func (r *Relay) Subscribe(ctx context.Context, filters Filters, opts ...Subscrip
 // Failure to do that will result in a huge number of halted goroutines being created.
 func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts ...SubscriptionOption) *Subscription {
 	current := subscriptionIDCounter.Add(1)
-	ctx, cancel := context.WithCancel(ctx)
+	ctx, cancel := context.WithCancelCause(ctx)
 
 	sub := &Subscription{
 		Relay:             r,
@@ -416,10 +428,12 @@ func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts .
 		switch o := opt.(type) {
 		case WithLabel:
 			label = string(o)
+		case WithCheckDuplicate:
+			sub.checkDuplicate = o
 		}
 	}
 
-	// subscription id calculation
+	// subscription id computation
 	buf := subIdPool.Get().([]byte)[:0]
 	buf = strconv.AppendInt(buf, sub.counter, 10)
 	buf = append(buf, ':')
@@ -450,7 +464,7 @@ func (r *Relay) QueryEvents(ctx context.Context, filter Filter) (chan *Event, er
 			case <-ctx.Done():
 			case <-r.Context().Done():
 			}
-			sub.Unsub()
+			sub.unsub(errors.New("QueryEvents() ended"))
 			return
 		}
 	}()
@@ -462,7 +476,7 @@ func (r *Relay) QuerySync(ctx context.Context, filter Filter) ([]*Event, error)
 	if _, ok := ctx.Deadline(); !ok {
 		// if no timeout is set, force it to 7 seconds
 		var cancel context.CancelFunc
-		ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
+		ctx, cancel = context.WithTimeoutCause(ctx, 7*time.Second, errors.New("QuerySync() took too long"))
 		defer cancel()
 	}
 
@@ -500,12 +514,12 @@ func (r *Relay) countInternal(ctx context.Context, filters Filters, opts ...Subs
 		return CountEnvelope{}, err
 	}
 
-	defer sub.Unsub()
+	defer sub.unsub(errors.New("countInternal() ended"))
 
 	if _, ok := ctx.Deadline(); !ok {
 		// if no timeout is set, force it to 7 seconds
 		var cancel context.CancelFunc
-		ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
+		ctx, cancel = context.WithTimeoutCause(ctx, 7*time.Second, errors.New("countInternal took too long"))
 		defer cancel()
 	}
 
@@ -520,13 +534,17 @@ func (r *Relay) countInternal(ctx context.Context, filters Filters, opts ...Subs
 }
 
 func (r *Relay) Close() error {
+	return r.close(errors.New("Close() called"))
+}
+
+func (r *Relay) close(reason error) error {
 	r.closeMutex.Lock()
 	defer r.closeMutex.Unlock()
 
 	if r.connectionContextCancel == nil {
 		return fmt.Errorf("relay already closed")
 	}
-	r.connectionContextCancel()
+	r.connectionContextCancel(reason)
 	r.connectionContextCancel = nil
 
 	if r.Connection == nil {
diff --git a/vendor/github.com/nbd-wtf/go-nostr/signature_libsecp256k1.go b/vendor/github.com/nbd-wtf/go-nostr/signature_libsecp256k1.go
index bf5ed7c..b4c59db 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/signature_libsecp256k1.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/signature_libsecp256k1.go
@@ -3,10 +3,23 @@
 package nostr
 
 /*
-#cgo LDFLAGS: -lsecp256k1
-#include <secp256k1.h>
-#include <secp256k1_schnorrsig.h>
-#include <secp256k1_extrakeys.h>
+#cgo CFLAGS: -I${SRCDIR}/libsecp256k1/include -I${SRCDIR}/libsecp256k1/src
+#cgo CFLAGS: -DECMULT_GEN_PREC_BITS=4
+#cgo CFLAGS: -DECMULT_WINDOW_SIZE=15
+#cgo CFLAGS: -DENABLE_MODULE_SCHNORRSIG=1
+#cgo CFLAGS: -DENABLE_MODULE_EXTRAKEYS=1
+
+#include "./libsecp256k1/src/secp256k1.c"
+#include "./libsecp256k1/src/precomputed_ecmult.c"
+#include "./libsecp256k1/src/precomputed_ecmult_gen.c"
+#include "./libsecp256k1/src/ecmult_gen.h"
+#include "./libsecp256k1/src/ecmult.h"
+#include "./libsecp256k1/src/modules/extrakeys/main_impl.h"
+#include "./libsecp256k1/src/modules/schnorrsig/main_impl.h"
+
+#include "./libsecp256k1/include/secp256k1.h"
+#include "./libsecp256k1/include/secp256k1_extrakeys.h"
+#include "./libsecp256k1/include/secp256k1_schnorrsig.h"
 */
 import "C"
 
diff --git a/vendor/github.com/nbd-wtf/go-nostr/subscription.go b/vendor/github.com/nbd-wtf/go-nostr/subscription.go
index 1adf62d..5d53a33 100644
--- a/vendor/github.com/nbd-wtf/go-nostr/subscription.go
+++ b/vendor/github.com/nbd-wtf/go-nostr/subscription.go
@@ -2,6 +2,7 @@ package nostr
 
 import (
 	"context"
+	"errors"
 	"fmt"
 	"sync"
 	"sync/atomic"
@@ -31,10 +32,14 @@ type Subscription struct {
 	// Context will be .Done() when the subscription ends
 	Context context.Context
 
+	// if it is not nil, CheckDuplicate will be called for every event received
+	// if it returns true that event will not be processed further.
+	checkDuplicate func(id string, relay string) bool
+
 	match  func(*Event) bool // this will be either Filters.Match or Filters.MatchIgnoringTimestampConstraints
 	live   atomic.Bool
 	eosed  atomic.Bool
-	cancel context.CancelFunc
+	cancel context.CancelCauseFunc
 
 	// this keeps track of the events we've received before the EOSE that we must dispatch before
 	// closing the EndOfStoredEvents channel
@@ -58,12 +63,21 @@ type WithLabel string
 
 func (_ WithLabel) IsSubscriptionOption() {}
 
-var _ SubscriptionOption = (WithLabel)("")
+// WithCheckDuplicate sets checkDuplicate on the subscription
+type WithCheckDuplicate func(id, relay string) bool
+
+func (_ WithCheckDuplicate) IsSubscriptionOption() {}
+
+var (
+	_ SubscriptionOption = (WithLabel)("")
+	_ SubscriptionOption = (WithCheckDuplicate)(nil)
+)
 
 func (sub *Subscription) start() {
 	<-sub.Context.Done()
+
 	// the subscription ends once the context is canceled (if not already)
-	sub.Unsub() // this will set sub.live to false
+	sub.unsub(errors.New("context done on start()")) // this will set sub.live to false
 
 	// do this so we don't have the possibility of closing the Events channel and then trying to send to it
 	sub.mu.Lock()
@@ -111,15 +125,19 @@ func (sub *Subscription) handleClosed(reason string) {
 	go func() {
 		sub.ClosedReason <- reason
 		sub.live.Store(false) // set this so we don't send an unnecessary CLOSE to the relay
-		sub.Unsub()
+		sub.unsub(fmt.Errorf("CLOSED received: %s", reason))
 	}()
 }
 
 // Unsub closes the subscription, sending "CLOSE" to relay as in NIP-01.
 // Unsub() also closes the channel sub.Events and makes a new one.
 func (sub *Subscription) Unsub() {
+	sub.unsub(errors.New("Unsub() called"))
+}
+
+func (sub *Subscription) unsub(err error) {
 	// cancel the context (if it's not canceled already)
-	sub.cancel()
+	sub.cancel(err)
 
 	// mark subscription as closed and send a CLOSE to the relay (naïve sync.Once implementation)
 	if sub.live.CompareAndSwap(true, false) {
@@ -157,8 +175,9 @@ func (sub *Subscription) Fire() error {
 
 	sub.live.Store(true)
 	if err := <-sub.Relay.Write(reqb); err != nil {
-		sub.cancel()
-		return fmt.Errorf("failed to write: %w", err)
+		err := fmt.Errorf("failed to write: %w", err)
+		sub.cancel(err)
+		return err
 	}
 
 	return nil
diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/README.md b/vendor/github.com/puzpuzpuz/xsync/v3/README.md
index 6fe0497..dac831b 100644
--- a/vendor/github.com/puzpuzpuz/xsync/v3/README.md
+++ b/vendor/github.com/puzpuzpuz/xsync/v3/README.md
@@ -80,7 +80,14 @@ m.Store(Point{42, 42}, 42)
 v, ok := m.Load(point{42, 42})
 ```
 
-Both maps use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
+Apart from `Range` method available for map iteration, there are also `ToPlainMap`/`ToPlainMapOf` utility functions to convert a `Map`/`MapOf` to a built-in Go's `map`:
+```go
+m := xsync.NewMapOf[int, int]()
+m.Store(42, 42)
+pm := xsync.ToPlainMapOf(m)
+```
+
+Both `Map` and `MapOf` use the built-in Golang's hash function which has DDOS protection. This means that each map instance gets its own seed number and the hash function uses that seed for hash code calculation. However, for smaller keys this hash function has some overhead. So, if you don't need DDOS protection, you may provide a custom hash function when creating a `MapOf`. For instance, Murmur3 finalizer does a decent job when it comes to integers:
 
 ```go
 m := NewMapOfWithHasher[int, int](func(i int, _ uint64) uint64 {
diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/map.go b/vendor/github.com/puzpuzpuz/xsync/v3/map.go
index 6c5b6eb..092aa0b 100644
--- a/vendor/github.com/puzpuzpuz/xsync/v3/map.go
+++ b/vendor/github.com/puzpuzpuz/xsync/v3/map.go
@@ -200,6 +200,21 @@ func newMapTable(minTableLen int) *mapTable {
 	return t
 }
 
+// ToPlainMap returns a native map with a copy of xsync Map's
+// contents. The copied xsync Map should not be modified while
+// this call is made. If the copied Map is modified, the copying
+// behavior is the same as in the Range method.
+func ToPlainMap(m *Map) map[string]interface{} {
+	pm := make(map[string]interface{})
+	if m != nil {
+		m.Range(func(key string, value interface{}) bool {
+			pm[key] = value
+			return true
+		})
+	}
+	return pm
+}
+
 // Load returns the value stored in the map for a key, or nil if no
 // value is present.
 // The ok result indicates whether value was found in the map.
diff --git a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
index 4c4ad08..9d8105e 100644
--- a/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
+++ b/vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
@@ -149,6 +149,21 @@ func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
 	return t
 }
 
+// ToPlainMapOf returns a native map with a copy of xsync Map's
+// contents. The copied xsync Map should not be modified while
+// this call is made. If the copied Map is modified, the copying
+// behavior is the same as in the Range method.
+func ToPlainMapOf[K comparable, V any](m *MapOf[K, V]) map[K]V {
+	pm := make(map[K]V)
+	if m != nil {
+		m.Range(func(key K, value V) bool {
+			pm[key] = value
+			return true
+		})
+	}
+	return pm
+}
+
 // Load returns the value stored in the map for a key, or zero value
 // of type V if no value is present.
 // The ok result indicates whether value was found in the map.
diff --git a/vendor/modules.txt b/vendor/modules.txt
index d3df308..ff1cfb6 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -18,6 +18,17 @@ github.com/btcsuite/btcd/btcec/v2/schnorr
 # github.com/btcsuite/btcd/chaincfg/chainhash v1.1.0
 ## explicit; go 1.17
 github.com/btcsuite/btcd/chaincfg/chainhash
+# github.com/coder/websocket v1.8.12
+## explicit; go 1.19
+github.com/coder/websocket
+github.com/coder/websocket/internal/bpool
+github.com/coder/websocket/internal/errd
+github.com/coder/websocket/internal/util
+github.com/coder/websocket/internal/wsjs
+github.com/coder/websocket/internal/xsync
+# github.com/davecgh/go-spew v1.1.1
+## explicit
+github.com/davecgh/go-spew/spew
 # github.com/decred/dcrd/crypto/blake256 v1.1.0
 ## explicit; go 1.17
 github.com/decred/dcrd/crypto/blake256
@@ -29,29 +40,15 @@ github.com/decred/dcrd/dcrec/secp256k1/v4/schnorr
 # github.com/fasthttp/websocket v1.5.12
 ## explicit; go 1.21
 github.com/fasthttp/websocket
-# github.com/fiatjaf/eventstore v0.14.4
+# github.com/fiatjaf/eventstore v0.16.0
 ## explicit; go 1.23.1
 github.com/fiatjaf/eventstore
 github.com/fiatjaf/eventstore/internal
 github.com/fiatjaf/eventstore/postgresql
-# github.com/fiatjaf/khatru v0.14.0
+# github.com/fiatjaf/khatru v0.15.0
 ## explicit; go 1.23.1
 github.com/fiatjaf/khatru
 github.com/fiatjaf/khatru/policies
-# github.com/gobwas/httphead v0.1.0
-## explicit; go 1.15
-github.com/gobwas/httphead
-# github.com/gobwas/pool v0.2.1
-## explicit
-github.com/gobwas/pool
-github.com/gobwas/pool/internal/pmath
-github.com/gobwas/pool/pbufio
-github.com/gobwas/pool/pbytes
-# github.com/gobwas/ws v1.4.0
-## explicit; go 1.16
-github.com/gobwas/ws
-github.com/gobwas/ws/wsflate
-github.com/gobwas/ws/wsutil
 # github.com/gorilla/schema v1.4.1
 ## explicit; go 1.20
 github.com/gorilla/schema
@@ -100,11 +97,12 @@ github.com/modern-go/concurrent
 # github.com/modern-go/reflect2 v1.0.2
 ## explicit; go 1.12
 github.com/modern-go/reflect2
-# github.com/nbd-wtf/go-nostr v0.45.0
+# github.com/nbd-wtf/go-nostr v0.48.0
 ## explicit; go 1.23.1
 github.com/nbd-wtf/go-nostr
 github.com/nbd-wtf/go-nostr/nip04
 github.com/nbd-wtf/go-nostr/nip11
+github.com/nbd-wtf/go-nostr/nip40
 github.com/nbd-wtf/go-nostr/nip42
 github.com/nbd-wtf/go-nostr/nip45
 github.com/nbd-wtf/go-nostr/nip45/hyperloglog
@@ -112,7 +110,7 @@ github.com/nbd-wtf/go-nostr/nip77
 github.com/nbd-wtf/go-nostr/nip77/negentropy
 github.com/nbd-wtf/go-nostr/nip77/negentropy/storage/vector
 github.com/nbd-wtf/go-nostr/nip86
-# github.com/puzpuzpuz/xsync/v3 v3.4.0
+# github.com/puzpuzpuz/xsync/v3 v3.4.1
 ## explicit; go 1.18
 github.com/puzpuzpuz/xsync/v3
 # github.com/rs/cors v1.11.1