device: use container/list instead of open coding it

This linked list implementation is awful, but maybe Go 2 will help
eventually, and at least we're not open coding the hlist any more.

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
This commit is contained in:
Jason A. Donenfeld 2021-02-10 18:19:11 +01:00
parent 747f5440bc
commit 75e6d810ed
2 changed files with 25 additions and 37 deletions

View File

@ -6,6 +6,7 @@
package device package device
import ( import (
"container/list"
"errors" "errors"
"math/bits" "math/bits"
"net" "net"
@ -14,14 +15,13 @@ import (
) )
type trieEntry struct { type trieEntry struct {
child [2]*trieEntry child [2]*trieEntry
peer *Peer peer *Peer
bits net.IP bits net.IP
cidr uint cidr uint
bit_at_byte uint bit_at_byte uint
bit_at_shift uint bit_at_shift uint
nextEntryForPeer *trieEntry perPeerElem *list.Element
pprevEntryForPeer **trieEntry
} }
func isLittleEndian() bool { func isLittleEndian() bool {
@ -69,28 +69,14 @@ func commonBits(ip1 net.IP, ip2 net.IP) uint {
} }
func (node *trieEntry) addToPeerEntries() { func (node *trieEntry) addToPeerEntries() {
p := node.peer node.perPeerElem = node.peer.trieEntries.PushBack(node)
first := p.firstTrieEntry
node.nextEntryForPeer = first
if first != nil {
first.pprevEntryForPeer = &node.nextEntryForPeer
}
p.firstTrieEntry = node
node.pprevEntryForPeer = &p.firstTrieEntry
} }
func (node *trieEntry) removeFromPeerEntries() { func (node *trieEntry) removeFromPeerEntries() {
if node.pprevEntryForPeer == nil { if node.perPeerElem != nil {
return node.peer.trieEntries.Remove(node.perPeerElem)
node.perPeerElem = nil
} }
next := node.nextEntryForPeer
pprev := node.pprevEntryForPeer
*pprev = next
if next != nil {
next.pprevEntryForPeer = pprev
}
node.nextEntryForPeer = nil
node.pprevEntryForPeer = nil
} }
func (node *trieEntry) removeByPeer(p *Peer) *trieEntry { func (node *trieEntry) removeByPeer(p *Peer) *trieEntry {
@ -226,7 +212,8 @@ func (table *AllowedIPs) EntriesForPeer(peer *Peer, cb func(ip net.IP, cidr uint
table.mutex.RLock() table.mutex.RLock()
defer table.mutex.RUnlock() defer table.mutex.RUnlock()
for node := peer.firstTrieEntry; node != nil; node = node.nextEntryForPeer { for elem := peer.trieEntries.Front(); elem != nil; elem = elem.Next() {
node := elem.Value.(*trieEntry)
if !cb(node.bits, node.cidr) { if !cb(node.bits, node.cidr) {
return return
} }

View File

@ -6,6 +6,7 @@
package device package device
import ( import (
"container/list"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
@ -17,15 +18,13 @@ import (
) )
type Peer struct { type Peer struct {
isRunning AtomicBool isRunning AtomicBool
sync.RWMutex // Mostly protects endpoint, but is generally taken whenever we modify peer sync.RWMutex // Mostly protects endpoint, but is generally taken whenever we modify peer
keypairs Keypairs keypairs Keypairs
handshake Handshake handshake Handshake
device *Device device *Device
endpoint conn.Endpoint endpoint conn.Endpoint
persistentKeepaliveInterval uint32 // accessed atomically stopping sync.WaitGroup // routines pending stop
firstTrieEntry *trieEntry
stopping sync.WaitGroup // routines pending stop
// These fields are accessed with atomic operations, which must be // These fields are accessed with atomic operations, which must be
// 64-bit aligned even on 32-bit platforms. Go guarantees that an // 64-bit aligned even on 32-bit platforms. Go guarantees that an
@ -61,7 +60,9 @@ type Peer struct {
inbound *autodrainingInboundQueue // sequential ordering of tun writing inbound *autodrainingInboundQueue // sequential ordering of tun writing
} }
cookieGenerator CookieGenerator cookieGenerator CookieGenerator
trieEntries list.List
persistentKeepaliveInterval uint32 // accessed atomically
} }
func (device *Device) NewPeer(pk NoisePublicKey) (*Peer, error) { func (device *Device) NewPeer(pk NoisePublicKey) (*Peer, error) {