replace zxq.co/ripple/hanayo

This commit is contained in:
Alicia
2019-02-23 13:29:15 +00:00
commit c3d206c173
5871 changed files with 1353715 additions and 0 deletions

70
vendor/gopkg.in/redis.v5/internal/pool/bench_test.go generated vendored Normal file
View File

@@ -0,0 +1,70 @@
package pool_test
import (
"errors"
"testing"
"time"
"gopkg.in/redis.v5/internal/pool"
)
func benchmarkPoolGetPut(b *testing.B, poolSize int) {
connPool := pool.NewConnPool(dummyDialer, poolSize, time.Second, time.Hour, time.Hour)
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cn, _, err := connPool.Get()
if err != nil {
b.Fatal(err)
}
if err = connPool.Put(cn); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkPoolGetPut10Conns(b *testing.B) {
benchmarkPoolGetPut(b, 10)
}
func BenchmarkPoolGetPut100Conns(b *testing.B) {
benchmarkPoolGetPut(b, 100)
}
func BenchmarkPoolGetPut1000Conns(b *testing.B) {
benchmarkPoolGetPut(b, 1000)
}
func benchmarkPoolGetRemove(b *testing.B, poolSize int) {
connPool := pool.NewConnPool(dummyDialer, poolSize, time.Second, time.Hour, time.Hour)
removeReason := errors.New("benchmark")
b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
cn, _, err := connPool.Get()
if err != nil {
b.Fatal(err)
}
if err := connPool.Remove(cn, removeReason); err != nil {
b.Fatal(err)
}
}
})
}
func BenchmarkPoolGetRemove10Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 10)
}
func BenchmarkPoolGetRemove100Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 100)
}
func BenchmarkPoolGetRemove1000Conns(b *testing.B) {
benchmarkPoolGetRemove(b, 1000)
}

78
vendor/gopkg.in/redis.v5/internal/pool/conn.go generated vendored Normal file
View File

@@ -0,0 +1,78 @@
package pool
import (
"net"
"sync/atomic"
"time"
"gopkg.in/redis.v5/internal/proto"
)
var noDeadline = time.Time{}
type Conn struct {
netConn net.Conn
Rd *proto.Reader
Wb *proto.WriteBuffer
Inited bool
usedAt atomic.Value
}
func NewConn(netConn net.Conn) *Conn {
cn := &Conn{
netConn: netConn,
Wb: proto.NewWriteBuffer(),
}
cn.Rd = proto.NewReader(cn.netConn)
cn.SetUsedAt(time.Now())
return cn
}
func (cn *Conn) UsedAt() time.Time {
return cn.usedAt.Load().(time.Time)
}
func (cn *Conn) SetUsedAt(tm time.Time) {
cn.usedAt.Store(tm)
}
func (cn *Conn) SetNetConn(netConn net.Conn) {
cn.netConn = netConn
cn.Rd.Reset(netConn)
}
func (cn *Conn) IsStale(timeout time.Duration) bool {
return timeout > 0 && time.Since(cn.UsedAt()) > timeout
}
func (cn *Conn) SetReadTimeout(timeout time.Duration) error {
now := time.Now()
cn.SetUsedAt(now)
if timeout > 0 {
return cn.netConn.SetReadDeadline(now.Add(timeout))
}
return cn.netConn.SetReadDeadline(noDeadline)
}
func (cn *Conn) SetWriteTimeout(timeout time.Duration) error {
now := time.Now()
cn.SetUsedAt(now)
if timeout > 0 {
return cn.netConn.SetWriteDeadline(now.Add(timeout))
}
return cn.netConn.SetWriteDeadline(noDeadline)
}
func (cn *Conn) Write(b []byte) (int, error) {
return cn.netConn.Write(b)
}
func (cn *Conn) RemoteAddr() net.Addr {
return cn.netConn.RemoteAddr()
}
func (cn *Conn) Close() error {
return cn.netConn.Close()
}

35
vendor/gopkg.in/redis.v5/internal/pool/main_test.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
package pool_test
import (
"net"
"sync"
"testing"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func TestGinkgoSuite(t *testing.T) {
RegisterFailHandler(Fail)
RunSpecs(t, "pool")
}
func perform(n int, cbs ...func(int)) {
var wg sync.WaitGroup
for _, cb := range cbs {
for i := 0; i < n; i++ {
wg.Add(1)
go func(cb func(int), i int) {
defer GinkgoRecover()
defer wg.Done()
cb(i)
}(cb, i)
}
}
wg.Wait()
}
func dummyDialer() (net.Conn, error) {
return &net.TCPConn{}, nil
}

354
vendor/gopkg.in/redis.v5/internal/pool/pool.go generated vendored Normal file
View File

@@ -0,0 +1,354 @@
package pool
import (
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
"time"
"gopkg.in/redis.v5/internal"
)
var (
ErrClosed = errors.New("redis: client is closed")
ErrPoolTimeout = errors.New("redis: connection pool timeout")
errConnStale = errors.New("connection is stale")
)
var timers = sync.Pool{
New: func() interface{} {
t := time.NewTimer(time.Hour)
t.Stop()
return t
},
}
// Stats contains pool state information and accumulated stats.
type Stats struct {
Requests uint32 // number of times a connection was requested by the pool
Hits uint32 // number of times free connection was found in the pool
Timeouts uint32 // number of times a wait timeout occurred
TotalConns uint32 // the number of total connections in the pool
FreeConns uint32 // the number of free connections in the pool
}
type Pooler interface {
Get() (*Conn, bool, error)
Put(*Conn) error
Remove(*Conn, error) error
Len() int
FreeLen() int
Stats() *Stats
Close() error
}
type dialer func() (net.Conn, error)
type ConnPool struct {
dial dialer
OnClose func(*Conn) error
poolTimeout time.Duration
idleTimeout time.Duration
queue chan struct{}
connsMu sync.Mutex
conns []*Conn
freeConnsMu sync.Mutex
freeConns []*Conn
stats Stats
_closed int32 // atomic
lastErr atomic.Value
}
var _ Pooler = (*ConnPool)(nil)
func NewConnPool(dial dialer, poolSize int, poolTimeout, idleTimeout, idleCheckFrequency time.Duration) *ConnPool {
p := &ConnPool{
dial: dial,
poolTimeout: poolTimeout,
idleTimeout: idleTimeout,
queue: make(chan struct{}, poolSize),
conns: make([]*Conn, 0, poolSize),
freeConns: make([]*Conn, 0, poolSize),
}
if idleTimeout > 0 && idleCheckFrequency > 0 {
go p.reaper(idleCheckFrequency)
}
return p
}
func (p *ConnPool) NewConn() (*Conn, error) {
netConn, err := p.dial()
if err != nil {
return nil, err
}
return NewConn(netConn), nil
}
func (p *ConnPool) PopFree() *Conn {
timer := timers.Get().(*time.Timer)
timer.Reset(p.poolTimeout)
select {
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
case <-timer.C:
timers.Put(timer)
atomic.AddUint32(&p.stats.Timeouts, 1)
return nil
}
p.freeConnsMu.Lock()
cn := p.popFree()
p.freeConnsMu.Unlock()
if cn == nil {
<-p.queue
}
return cn
}
func (p *ConnPool) popFree() *Conn {
if len(p.freeConns) == 0 {
return nil
}
idx := len(p.freeConns) - 1
cn := p.freeConns[idx]
p.freeConns = p.freeConns[:idx]
return cn
}
// Get returns existed connection from the pool or creates a new one.
func (p *ConnPool) Get() (*Conn, bool, error) {
if p.closed() {
return nil, false, ErrClosed
}
atomic.AddUint32(&p.stats.Requests, 1)
timer := timers.Get().(*time.Timer)
timer.Reset(p.poolTimeout)
select {
case p.queue <- struct{}{}:
if !timer.Stop() {
<-timer.C
}
timers.Put(timer)
case <-timer.C:
timers.Put(timer)
atomic.AddUint32(&p.stats.Timeouts, 1)
return nil, false, ErrPoolTimeout
}
for {
p.freeConnsMu.Lock()
cn := p.popFree()
p.freeConnsMu.Unlock()
if cn == nil {
break
}
if cn.IsStale(p.idleTimeout) {
p.remove(cn, errConnStale)
continue
}
atomic.AddUint32(&p.stats.Hits, 1)
return cn, false, nil
}
newcn, err := p.NewConn()
if err != nil {
<-p.queue
return nil, false, err
}
p.connsMu.Lock()
p.conns = append(p.conns, newcn)
p.connsMu.Unlock()
return newcn, true, nil
}
func (p *ConnPool) Put(cn *Conn) error {
if data := cn.Rd.PeekBuffered(); data != nil {
err := fmt.Errorf("connection has unread data: %q", data)
internal.Logf(err.Error())
return p.Remove(cn, err)
}
p.freeConnsMu.Lock()
p.freeConns = append(p.freeConns, cn)
p.freeConnsMu.Unlock()
<-p.queue
return nil
}
func (p *ConnPool) Remove(cn *Conn, reason error) error {
p.remove(cn, reason)
<-p.queue
return nil
}
func (p *ConnPool) remove(cn *Conn, reason error) {
_ = p.closeConn(cn, reason)
p.connsMu.Lock()
for i, c := range p.conns {
if c == cn {
p.conns = append(p.conns[:i], p.conns[i+1:]...)
break
}
}
p.connsMu.Unlock()
}
// Len returns total number of connections.
func (p *ConnPool) Len() int {
p.connsMu.Lock()
l := len(p.conns)
p.connsMu.Unlock()
return l
}
// FreeLen returns number of free connections.
func (p *ConnPool) FreeLen() int {
p.freeConnsMu.Lock()
l := len(p.freeConns)
p.freeConnsMu.Unlock()
return l
}
func (p *ConnPool) Stats() *Stats {
return &Stats{
Requests: atomic.LoadUint32(&p.stats.Requests),
Hits: atomic.LoadUint32(&p.stats.Hits),
Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
TotalConns: uint32(p.Len()),
FreeConns: uint32(p.FreeLen()),
}
}
func (p *ConnPool) closed() bool {
return atomic.LoadInt32(&p._closed) == 1
}
func (p *ConnPool) Close() error {
if !atomic.CompareAndSwapInt32(&p._closed, 0, 1) {
return ErrClosed
}
p.connsMu.Lock()
var firstErr error
for _, cn := range p.conns {
if cn == nil {
continue
}
if err := p.closeConn(cn, ErrClosed); err != nil && firstErr == nil {
firstErr = err
}
}
p.conns = nil
p.connsMu.Unlock()
p.freeConnsMu.Lock()
p.freeConns = nil
p.freeConnsMu.Unlock()
return firstErr
}
func (p *ConnPool) closeConn(cn *Conn, reason error) error {
if p.OnClose != nil {
_ = p.OnClose(cn)
}
return cn.Close()
}
func (p *ConnPool) reapStaleConn() bool {
if len(p.freeConns) == 0 {
return false
}
cn := p.freeConns[0]
if !cn.IsStale(p.idleTimeout) {
return false
}
p.remove(cn, errConnStale)
p.freeConns = append(p.freeConns[:0], p.freeConns[1:]...)
return true
}
func (p *ConnPool) ReapStaleConns() (int, error) {
var n int
for {
p.queue <- struct{}{}
p.freeConnsMu.Lock()
reaped := p.reapStaleConn()
p.freeConnsMu.Unlock()
<-p.queue
if reaped {
n++
} else {
break
}
}
return n, nil
}
func (p *ConnPool) reaper(frequency time.Duration) {
ticker := time.NewTicker(frequency)
defer ticker.Stop()
for _ = range ticker.C {
if p.closed() {
break
}
n, err := p.ReapStaleConns()
if err != nil {
internal.Logf("ReapStaleConns failed: %s", err)
continue
}
s := p.Stats()
internal.Logf(
"reaper: removed %d stale conns (TotalConns=%d FreeConns=%d Requests=%d Hits=%d Timeouts=%d)",
n, s.TotalConns, s.FreeConns, s.Requests, s.Hits, s.Timeouts,
)
}
}
//------------------------------------------------------------------------------
var idleCheckFrequency atomic.Value
func SetIdleCheckFrequency(d time.Duration) {
idleCheckFrequency.Store(d)
}
func getIdleCheckFrequency() time.Duration {
v := idleCheckFrequency.Load()
if v == nil {
return time.Minute
}
return v.(time.Duration)
}

47
vendor/gopkg.in/redis.v5/internal/pool/pool_single.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
package pool
type SingleConnPool struct {
cn *Conn
}
var _ Pooler = (*SingleConnPool)(nil)
func NewSingleConnPool(cn *Conn) *SingleConnPool {
return &SingleConnPool{
cn: cn,
}
}
func (p *SingleConnPool) Get() (*Conn, bool, error) {
return p.cn, false, nil
}
func (p *SingleConnPool) Put(cn *Conn) error {
if p.cn != cn {
panic("p.cn != cn")
}
return nil
}
func (p *SingleConnPool) Remove(cn *Conn, _ error) error {
if p.cn != cn {
panic("p.cn != cn")
}
return nil
}
func (p *SingleConnPool) Len() int {
return 1
}
func (p *SingleConnPool) FreeLen() int {
return 0
}
func (p *SingleConnPool) Stats() *Stats {
return nil
}
func (p *SingleConnPool) Close() error {
return nil
}

119
vendor/gopkg.in/redis.v5/internal/pool/pool_sticky.go generated vendored Normal file
View File

@@ -0,0 +1,119 @@
package pool
import (
"errors"
"sync"
)
type StickyConnPool struct {
pool *ConnPool
reusable bool
cn *Conn
closed bool
mu sync.Mutex
}
var _ Pooler = (*StickyConnPool)(nil)
func NewStickyConnPool(pool *ConnPool, reusable bool) *StickyConnPool {
return &StickyConnPool{
pool: pool,
reusable: reusable,
}
}
func (p *StickyConnPool) Get() (*Conn, bool, error) {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil, false, ErrClosed
}
if p.cn != nil {
return p.cn, false, nil
}
cn, _, err := p.pool.Get()
if err != nil {
return nil, false, err
}
p.cn = cn
return cn, true, nil
}
func (p *StickyConnPool) putUpstream() (err error) {
err = p.pool.Put(p.cn)
p.cn = nil
return err
}
func (p *StickyConnPool) Put(cn *Conn) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return ErrClosed
}
return nil
}
func (p *StickyConnPool) removeUpstream(reason error) error {
err := p.pool.Remove(p.cn, reason)
p.cn = nil
return err
}
func (p *StickyConnPool) Remove(cn *Conn, reason error) error {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return nil
}
return p.removeUpstream(reason)
}
func (p *StickyConnPool) Len() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.cn == nil {
return 0
}
return 1
}
func (p *StickyConnPool) FreeLen() int {
p.mu.Lock()
defer p.mu.Unlock()
if p.cn == nil {
return 1
}
return 0
}
func (p *StickyConnPool) Stats() *Stats {
return nil
}
func (p *StickyConnPool) Close() error {
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return ErrClosed
}
p.closed = true
var err error
if p.cn != nil {
if p.reusable {
err = p.putUpstream()
} else {
reason := errors.New("redis: unreusable sticky connection")
err = p.removeUpstream(reason)
}
}
return err
}

247
vendor/gopkg.in/redis.v5/internal/pool/pool_test.go generated vendored Normal file
View File

@@ -0,0 +1,247 @@
package pool_test
import (
"errors"
"testing"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"gopkg.in/redis.v5/internal/pool"
)
var _ = Describe("ConnPool", func() {
var connPool *pool.ConnPool
BeforeEach(func() {
connPool = pool.NewConnPool(
dummyDialer, 10, time.Hour, time.Millisecond, time.Millisecond)
})
AfterEach(func() {
connPool.Close()
})
It("should unblock client when conn is removed", func() {
// Reserve one connection.
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
// Reserve all other connections.
var cns []*pool.Conn
for i := 0; i < 9; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
cns = append(cns, cn)
}
started := make(chan bool, 1)
done := make(chan bool, 1)
go func() {
defer GinkgoRecover()
started <- true
_, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
done <- true
err = connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}()
<-started
// Check that Get is blocked.
select {
case <-done:
Fail("Get is not blocked")
default:
// ok
}
err = connPool.Remove(cn, errors.New("test"))
Expect(err).NotTo(HaveOccurred())
// Check that Ping is unblocked.
select {
case <-done:
// ok
case <-time.After(time.Second):
Fail("Get is not unblocked")
}
for _, cn := range cns {
err = connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}
})
})
var _ = Describe("conns reaper", func() {
const idleTimeout = time.Minute
var connPool *pool.ConnPool
var conns, idleConns, closedConns []*pool.Conn
BeforeEach(func() {
connPool = pool.NewConnPool(
dummyDialer, 10, time.Second, idleTimeout, time.Hour)
closedConns = nil
connPool.OnClose = func(cn *pool.Conn) error {
closedConns = append(closedConns, cn)
return nil
}
conns = nil
// add stale connections
idleConns = nil
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
cn.SetUsedAt(time.Now().Add(-2 * idleTimeout))
conns = append(conns, cn)
idleConns = append(idleConns, cn)
}
// add fresh connections
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
conns = append(conns, cn)
}
for _, cn := range conns {
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
Expect(connPool.Len()).To(Equal(6))
Expect(connPool.FreeLen()).To(Equal(6))
n, err := connPool.ReapStaleConns()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(3))
})
AfterEach(func() {
_ = connPool.Close()
Expect(connPool.Len()).To(Equal(0))
Expect(connPool.FreeLen()).To(Equal(0))
Expect(len(closedConns)).To(Equal(len(conns)))
Expect(closedConns).To(ConsistOf(conns))
})
It("reaps stale connections", func() {
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(3))
})
It("does not reap fresh connections", func() {
n, err := connPool.ReapStaleConns()
Expect(err).NotTo(HaveOccurred())
Expect(n).To(Equal(0))
})
It("stale connections are closed", func() {
Expect(len(closedConns)).To(Equal(len(idleConns)))
Expect(closedConns).To(ConsistOf(idleConns))
})
It("pool is functional", func() {
for j := 0; j < 3; j++ {
var freeCns []*pool.Conn
for i := 0; i < 3; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
Expect(cn).NotTo(BeNil())
freeCns = append(freeCns, cn)
}
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(0))
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
Expect(cn).NotTo(BeNil())
conns = append(conns, cn)
Expect(connPool.Len()).To(Equal(4))
Expect(connPool.FreeLen()).To(Equal(0))
err = connPool.Remove(cn, errors.New("test"))
Expect(err).NotTo(HaveOccurred())
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(0))
for _, cn := range freeCns {
err := connPool.Put(cn)
Expect(err).NotTo(HaveOccurred())
}
Expect(connPool.Len()).To(Equal(3))
Expect(connPool.FreeLen()).To(Equal(3))
}
})
})
var _ = Describe("race", func() {
var connPool *pool.ConnPool
var C, N int
BeforeEach(func() {
C, N = 10, 1000
if testing.Short() {
C = 4
N = 100
}
})
AfterEach(func() {
connPool.Close()
})
It("does not happen on Get, Put, and Remove", func() {
connPool = pool.NewConnPool(
dummyDialer, 10, time.Minute, time.Millisecond, time.Millisecond)
perform(C, func(id int) {
for i := 0; i < N; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
if err == nil {
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
}
}, func(id int) {
for i := 0; i < N; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
if err == nil {
Expect(connPool.Remove(cn, errors.New("test"))).NotTo(HaveOccurred())
}
}
})
})
It("does not happen on Get and PopFree", func() {
connPool = pool.NewConnPool(
dummyDialer, 10, time.Minute, time.Second, time.Millisecond)
perform(C, func(id int) {
for i := 0; i < N; i++ {
cn, _, err := connPool.Get()
Expect(err).NotTo(HaveOccurred())
if err == nil {
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
cn = connPool.PopFree()
if cn != nil {
Expect(connPool.Put(cn)).NotTo(HaveOccurred())
}
}
})
})
})