2019-01-02 01:55:51 +01:00
|
|
|
/* SPDX-License-Identifier: MIT
|
2018-05-03 15:04:00 +02:00
|
|
|
*
|
2022-09-20 17:21:32 +02:00
|
|
|
* Copyright (C) 2017-2023 WireGuard LLC. All Rights Reserved.
|
2018-05-03 15:04:00 +02:00
|
|
|
*/
|
|
|
|
|
2019-03-03 04:04:41 +01:00
|
|
|
package device
|
2018-03-08 16:44:27 +01:00
|
|
|
|
2019-05-29 18:18:20 +02:00
|
|
|
import (
|
|
|
|
"bytes"
|
2021-02-03 17:43:41 +01:00
|
|
|
"encoding/hex"
|
2020-07-30 18:20:49 +02:00
|
|
|
"fmt"
|
2021-02-17 22:19:27 +01:00
|
|
|
"io"
|
2021-02-03 17:43:41 +01:00
|
|
|
"math/rand"
|
2022-03-17 00:09:48 +01:00
|
|
|
"net/netip"
|
2023-03-02 23:48:02 +01:00
|
|
|
"os"
|
2021-02-02 19:41:20 +01:00
|
|
|
"runtime"
|
|
|
|
"runtime/pprof"
|
2020-12-15 00:07:23 +01:00
|
|
|
"sync"
|
2021-01-06 03:14:59 +01:00
|
|
|
"sync/atomic"
|
2019-05-29 18:18:20 +02:00
|
|
|
"testing"
|
2019-10-12 18:44:05 +02:00
|
|
|
"time"
|
|
|
|
|
2024-01-07 20:03:11 +01:00
|
|
|
"gitea.hbanafa.com/hesham/wireguard-go/conn"
|
|
|
|
"gitea.hbanafa.com/hesham/wireguard-go/conn/bindtest"
|
|
|
|
"gitea.hbanafa.com/hesham/wireguard-go/tun"
|
|
|
|
"gitea.hbanafa.com/hesham/wireguard-go/tun/tuntest"
|
2019-05-29 18:18:20 +02:00
|
|
|
)
|
2018-03-08 16:44:27 +01:00
|
|
|
|
2021-02-03 17:29:01 +01:00
|
|
|
// uapiCfg returns a string that contains cfg formatted use with IpcSet.
|
2020-12-14 23:12:56 +01:00
|
|
|
// cfg is a series of alternating key/value strings.
|
|
|
|
// uapiCfg exists because editors and humans like to insert
|
|
|
|
// whitespace into configs, which can cause failures, some of which are silent.
|
|
|
|
// For example, a leading blank newline causes the remainder
|
|
|
|
// of the config to be silently ignored.
|
2021-02-03 17:29:01 +01:00
|
|
|
func uapiCfg(cfg ...string) string {
|
2020-12-14 23:12:56 +01:00
|
|
|
if len(cfg)%2 != 0 {
|
|
|
|
panic("odd number of args to uapiReader")
|
|
|
|
}
|
|
|
|
buf := new(bytes.Buffer)
|
|
|
|
for i, s := range cfg {
|
|
|
|
buf.WriteString(s)
|
|
|
|
sep := byte('\n')
|
|
|
|
if i%2 == 0 {
|
|
|
|
sep = '='
|
|
|
|
}
|
|
|
|
buf.WriteByte(sep)
|
|
|
|
}
|
2021-02-03 17:29:01 +01:00
|
|
|
return buf.String()
|
2020-12-14 23:12:56 +01:00
|
|
|
}
|
|
|
|
|
device: make test setup more robust
Picking two free ports to use for a test is difficult.
The free port we selected might no longer be free when we reach
for it a second time.
On my machine, this failure mode led to failures approximately
once per thousand test runs.
Since failures are rare, and threading through and checking for
all possible errors is complicated, fix this with a big hammer:
Retry if either device fails to come up.
Also, if you accidentally pick the same port twice, delightful confusion ensues.
The handshake failures manifest as crypto errors, which look scary.
Again, fix with retries.
To make these retries easier to implement, use testing.T.Cleanup
instead of defer to close devices. This requires Go 1.14.
Update go.mod accordingly. Go 1.13 is no longer supported anyway.
With these fixes, 'go test -race' ran 100,000 times without failure.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2020-12-09 04:23:56 +01:00
|
|
|
// genConfigs generates a pair of configs that connect to each other.
|
|
|
|
// The configs use distinct, probably-usable ports.
|
2021-12-09 17:55:50 +01:00
|
|
|
func genConfigs(tb testing.TB) (cfgs, endpointCfgs [2]string) {
|
2021-02-09 00:33:18 +01:00
|
|
|
var key1, key2 NoisePrivateKey
|
|
|
|
_, err := rand.Read(key1[:])
|
|
|
|
if err != nil {
|
|
|
|
tb.Errorf("unable to generate private key random bytes: %v", err)
|
|
|
|
}
|
|
|
|
_, err = rand.Read(key2[:])
|
|
|
|
if err != nil {
|
|
|
|
tb.Errorf("unable to generate private key random bytes: %v", err)
|
|
|
|
}
|
|
|
|
pub1, pub2 := key1.publicKey(), key2.publicKey()
|
2020-12-14 23:12:56 +01:00
|
|
|
|
|
|
|
cfgs[0] = uapiCfg(
|
2021-02-09 00:33:18 +01:00
|
|
|
"private_key", hex.EncodeToString(key1[:]),
|
2021-02-09 00:59:39 +01:00
|
|
|
"listen_port", "0",
|
2020-12-14 23:12:56 +01:00
|
|
|
"replace_peers", "true",
|
2021-02-09 00:33:18 +01:00
|
|
|
"public_key", hex.EncodeToString(pub2[:]),
|
2020-12-14 23:12:56 +01:00
|
|
|
"protocol_version", "1",
|
|
|
|
"replace_allowed_ips", "true",
|
|
|
|
"allowed_ip", "1.0.0.2/32",
|
2021-02-09 00:59:39 +01:00
|
|
|
)
|
|
|
|
endpointCfgs[0] = uapiCfg(
|
|
|
|
"public_key", hex.EncodeToString(pub2[:]),
|
|
|
|
"endpoint", "127.0.0.1:%d",
|
2020-12-14 23:12:56 +01:00
|
|
|
)
|
|
|
|
cfgs[1] = uapiCfg(
|
2021-02-09 00:33:18 +01:00
|
|
|
"private_key", hex.EncodeToString(key2[:]),
|
2021-02-09 00:59:39 +01:00
|
|
|
"listen_port", "0",
|
2020-12-14 23:12:56 +01:00
|
|
|
"replace_peers", "true",
|
2021-02-09 00:33:18 +01:00
|
|
|
"public_key", hex.EncodeToString(pub1[:]),
|
2020-12-14 23:12:56 +01:00
|
|
|
"protocol_version", "1",
|
|
|
|
"replace_allowed_ips", "true",
|
|
|
|
"allowed_ip", "1.0.0.1/32",
|
2021-02-09 00:59:39 +01:00
|
|
|
)
|
|
|
|
endpointCfgs[1] = uapiCfg(
|
|
|
|
"public_key", hex.EncodeToString(pub1[:]),
|
|
|
|
"endpoint", "127.0.0.1:%d",
|
2020-12-14 23:12:56 +01:00
|
|
|
)
|
device: make test setup more robust
Picking two free ports to use for a test is difficult.
The free port we selected might no longer be free when we reach
for it a second time.
On my machine, this failure mode led to failures approximately
once per thousand test runs.
Since failures are rare, and threading through and checking for
all possible errors is complicated, fix this with a big hammer:
Retry if either device fails to come up.
Also, if you accidentally pick the same port twice, delightful confusion ensues.
The handshake failures manifest as crypto errors, which look scary.
Again, fix with retries.
To make these retries easier to implement, use testing.T.Cleanup
instead of defer to close devices. This requires Go 1.14.
Update go.mod accordingly. Go 1.13 is no longer supported anyway.
With these fixes, 'go test -race' ran 100,000 times without failure.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2020-12-09 04:23:56 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-12-15 00:07:23 +01:00
|
|
|
// A testPair is a pair of testPeers.
|
|
|
|
type testPair [2]testPeer
|
|
|
|
|
|
|
|
// A testPeer is a peer used for testing.
|
|
|
|
type testPeer struct {
|
|
|
|
tun *tuntest.ChannelTUN
|
|
|
|
dev *Device
|
2021-11-05 01:52:54 +01:00
|
|
|
ip netip.Addr
|
2020-12-15 00:07:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
type SendDirection bool
|
|
|
|
|
|
|
|
const (
|
|
|
|
Ping SendDirection = true
|
|
|
|
Pong SendDirection = false
|
|
|
|
)
|
|
|
|
|
2021-02-08 20:36:55 +01:00
|
|
|
func (d SendDirection) String() string {
|
|
|
|
if d == Ping {
|
|
|
|
return "ping"
|
|
|
|
}
|
|
|
|
return "pong"
|
|
|
|
}
|
|
|
|
|
2021-01-06 00:03:24 +01:00
|
|
|
func (pair *testPair) Send(tb testing.TB, ping SendDirection, done chan struct{}) {
|
|
|
|
tb.Helper()
|
2020-12-15 00:07:23 +01:00
|
|
|
p0, p1 := pair[0], pair[1]
|
|
|
|
if !ping {
|
|
|
|
// pong is the new ping
|
|
|
|
p0, p1 = p1, p0
|
|
|
|
}
|
|
|
|
msg := tuntest.Ping(p0.ip, p1.ip)
|
|
|
|
p1.tun.Outbound <- msg
|
|
|
|
timer := time.NewTimer(5 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
var err error
|
|
|
|
select {
|
|
|
|
case msgRecv := <-p0.tun.Inbound:
|
|
|
|
if !bytes.Equal(msg, msgRecv) {
|
2021-02-08 20:36:55 +01:00
|
|
|
err = fmt.Errorf("%s did not transit correctly", ping)
|
2020-12-15 00:07:23 +01:00
|
|
|
}
|
|
|
|
case <-timer.C:
|
2021-02-08 20:36:55 +01:00
|
|
|
err = fmt.Errorf("%s did not transit", ping)
|
2020-12-15 00:07:23 +01:00
|
|
|
case <-done:
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
// The error may have occurred because the test is done.
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
// Real error.
|
2021-01-06 00:03:24 +01:00
|
|
|
tb.Error(err)
|
2020-12-15 00:07:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// genTestPair creates a testPair.
|
2021-02-22 04:30:31 +01:00
|
|
|
func genTestPair(tb testing.TB, realSocket bool) (pair testPair) {
|
2021-02-09 00:59:39 +01:00
|
|
|
cfg, endpointCfg := genConfigs(tb)
|
2021-02-22 04:30:31 +01:00
|
|
|
var binds [2]conn.Bind
|
|
|
|
if realSocket {
|
|
|
|
binds[0], binds[1] = conn.NewDefaultBind(), conn.NewDefaultBind()
|
|
|
|
} else {
|
|
|
|
binds = bindtest.NewChannelBinds()
|
|
|
|
}
|
2021-02-09 00:59:39 +01:00
|
|
|
// Bring up a ChannelTun for each config.
|
|
|
|
for i := range pair {
|
|
|
|
p := &pair[i]
|
|
|
|
p.tun = tuntest.NewChannelTUN()
|
2021-11-05 01:52:54 +01:00
|
|
|
p.ip = netip.AddrFrom4([4]byte{1, 0, 0, byte(i + 1)})
|
2021-02-09 00:59:39 +01:00
|
|
|
level := LogLevelVerbose
|
|
|
|
if _, ok := tb.(*testing.B); ok && !testing.Verbose() {
|
|
|
|
level = LogLevelError
|
|
|
|
}
|
2021-02-22 04:30:31 +01:00
|
|
|
p.dev = NewDevice(p.tun.TUN(), binds[i], NewLogger(level, fmt.Sprintf("dev%d: ", i)))
|
2021-02-09 00:59:39 +01:00
|
|
|
if err := p.dev.IpcSet(cfg[i]); err != nil {
|
|
|
|
tb.Errorf("failed to configure device %d: %v", i, err)
|
|
|
|
p.dev.Close()
|
|
|
|
continue
|
|
|
|
}
|
2021-02-10 00:12:23 +01:00
|
|
|
if err := p.dev.Up(); err != nil {
|
|
|
|
tb.Errorf("failed to bring up device %d: %v", i, err)
|
2021-02-09 00:59:39 +01:00
|
|
|
p.dev.Close()
|
|
|
|
continue
|
device: make test setup more robust
Picking two free ports to use for a test is difficult.
The free port we selected might no longer be free when we reach
for it a second time.
On my machine, this failure mode led to failures approximately
once per thousand test runs.
Since failures are rare, and threading through and checking for
all possible errors is complicated, fix this with a big hammer:
Retry if either device fails to come up.
Also, if you accidentally pick the same port twice, delightful confusion ensues.
The handshake failures manifest as crypto errors, which look scary.
Again, fix with retries.
To make these retries easier to implement, use testing.T.Cleanup
instead of defer to close devices. This requires Go 1.14.
Update go.mod accordingly. Go 1.13 is no longer supported anyway.
With these fixes, 'go test -race' ran 100,000 times without failure.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2020-12-09 04:23:56 +01:00
|
|
|
}
|
2021-02-09 00:59:39 +01:00
|
|
|
endpointCfg[i^1] = fmt.Sprintf(endpointCfg[i^1], p.dev.net.port)
|
|
|
|
}
|
|
|
|
for i := range pair {
|
|
|
|
p := &pair[i]
|
|
|
|
if err := p.dev.IpcSet(endpointCfg[i]); err != nil {
|
|
|
|
tb.Errorf("failed to configure device endpoint %d: %v", i, err)
|
|
|
|
p.dev.Close()
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// The device is ready. Close it when the test completes.
|
|
|
|
tb.Cleanup(p.dev.Close)
|
device: make test setup more robust
Picking two free ports to use for a test is difficult.
The free port we selected might no longer be free when we reach
for it a second time.
On my machine, this failure mode led to failures approximately
once per thousand test runs.
Since failures are rare, and threading through and checking for
all possible errors is complicated, fix this with a big hammer:
Retry if either device fails to come up.
Also, if you accidentally pick the same port twice, delightful confusion ensues.
The handshake failures manifest as crypto errors, which look scary.
Again, fix with retries.
To make these retries easier to implement, use testing.T.Cleanup
instead of defer to close devices. This requires Go 1.14.
Update go.mod accordingly. Go 1.13 is no longer supported anyway.
With these fixes, 'go test -race' ran 100,000 times without failure.
Signed-off-by: Josh Bleecher Snyder <josh@tailscale.com>
2020-12-09 04:23:56 +01:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestTwoDevicePing(t *testing.T) {
|
2021-02-02 19:41:20 +01:00
|
|
|
goroutineLeakCheck(t)
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(t, true)
|
2019-10-12 18:44:05 +02:00
|
|
|
t.Run("ping 1.0.0.1", func(t *testing.T) {
|
2020-12-15 00:07:23 +01:00
|
|
|
pair.Send(t, Ping, nil)
|
2019-10-12 18:44:05 +02:00
|
|
|
})
|
|
|
|
t.Run("ping 1.0.0.2", func(t *testing.T) {
|
2020-12-15 00:07:23 +01:00
|
|
|
pair.Send(t, Pong, nil)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-02-03 17:43:41 +01:00
|
|
|
func TestUpDown(t *testing.T) {
|
|
|
|
goroutineLeakCheck(t)
|
2021-02-22 04:30:31 +01:00
|
|
|
const itrials = 50
|
|
|
|
const otrials = 10
|
2021-02-03 17:43:41 +01:00
|
|
|
|
|
|
|
for n := 0; n < otrials; n++ {
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(t, false)
|
2021-02-03 17:43:41 +01:00
|
|
|
for i := range pair {
|
|
|
|
for k := range pair[i].dev.peers.keyMap {
|
|
|
|
pair[i].dev.IpcSet(fmt.Sprintf("public_key=%s\npersistent_keepalive_interval=1\n", hex.EncodeToString(k[:])))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(pair))
|
|
|
|
for i := range pair {
|
|
|
|
go func(d *Device) {
|
|
|
|
defer wg.Done()
|
|
|
|
for i := 0; i < itrials; i++ {
|
2021-02-22 04:30:31 +01:00
|
|
|
if err := d.Up(); err != nil {
|
|
|
|
t.Errorf("failed up bring up device: %v", err)
|
2021-02-10 00:12:23 +01:00
|
|
|
}
|
2021-02-03 17:43:41 +01:00
|
|
|
time.Sleep(time.Duration(rand.Intn(int(time.Nanosecond * (0x10000 - 1)))))
|
2021-02-10 00:12:23 +01:00
|
|
|
if err := d.Down(); err != nil {
|
|
|
|
t.Errorf("failed to bring down device: %v", err)
|
|
|
|
}
|
2021-02-03 17:43:41 +01:00
|
|
|
time.Sleep(time.Duration(rand.Intn(int(time.Nanosecond * (0x10000 - 1)))))
|
|
|
|
}
|
|
|
|
}(pair[i].dev)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
for i := range pair {
|
|
|
|
pair[i].dev.Up()
|
|
|
|
pair[i].dev.Close()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-15 00:07:23 +01:00
|
|
|
// TestConcurrencySafety does other things concurrently with tunnel use.
|
|
|
|
// It is intended to be used with the race detector to catch data races.
|
|
|
|
func TestConcurrencySafety(t *testing.T) {
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(t, true)
|
2020-12-15 00:07:23 +01:00
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
const warmupIters = 10
|
|
|
|
var warmup sync.WaitGroup
|
|
|
|
warmup.Add(warmupIters)
|
|
|
|
go func() {
|
|
|
|
// Send data continuously back and forth until we're done.
|
|
|
|
// Note that we may continue to attempt to send data
|
|
|
|
// even after done is closed.
|
|
|
|
i := warmupIters
|
|
|
|
for ping := Ping; ; ping = !ping {
|
|
|
|
pair.Send(t, ping, done)
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
if i > 0 {
|
|
|
|
warmup.Done()
|
|
|
|
i--
|
2019-10-12 18:44:05 +02:00
|
|
|
}
|
|
|
|
}
|
2020-12-15 00:07:23 +01:00
|
|
|
}()
|
|
|
|
warmup.Wait()
|
|
|
|
|
2021-02-03 17:29:01 +01:00
|
|
|
applyCfg := func(cfg string) {
|
|
|
|
err := pair[0].dev.IpcSet(cfg)
|
2020-12-16 00:02:13 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-15 00:28:52 +01:00
|
|
|
// Change persistent_keepalive_interval concurrently with tunnel use.
|
|
|
|
t.Run("persistentKeepaliveInterval", func(t *testing.T) {
|
2021-02-09 00:33:18 +01:00
|
|
|
var pub NoisePublicKey
|
|
|
|
for key := range pair[0].dev.peers.keyMap {
|
|
|
|
pub = key
|
|
|
|
break
|
|
|
|
}
|
2020-12-15 00:28:52 +01:00
|
|
|
cfg := uapiCfg(
|
2021-02-09 00:33:18 +01:00
|
|
|
"public_key", hex.EncodeToString(pub[:]),
|
2020-12-15 00:28:52 +01:00
|
|
|
"persistent_keepalive_interval", "1",
|
|
|
|
)
|
|
|
|
for i := 0; i < 1000; i++ {
|
2020-12-16 00:02:13 +01:00
|
|
|
applyCfg(cfg)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Change private keys concurrently with tunnel use.
|
|
|
|
t.Run("privateKey", func(t *testing.T) {
|
|
|
|
bad := uapiCfg("private_key", "7777777777777777777777777777777777777777777777777777777777777777")
|
2021-02-09 00:33:18 +01:00
|
|
|
good := uapiCfg("private_key", hex.EncodeToString(pair[0].dev.staticIdentity.privateKey[:]))
|
2020-12-16 00:02:13 +01:00
|
|
|
// Set iters to a large number like 1000 to flush out data races quickly.
|
|
|
|
// Don't leave it large. That can cause logical races
|
|
|
|
// in which the handshake is interleaved with key changes
|
|
|
|
// such that the private key appears to be unchanging but
|
|
|
|
// other state gets reset, which can cause handshake failures like
|
|
|
|
// "Received packet with invalid mac1".
|
|
|
|
const iters = 1
|
|
|
|
for i := 0; i < iters; i++ {
|
|
|
|
applyCfg(bad)
|
|
|
|
applyCfg(good)
|
2020-12-15 00:28:52 +01:00
|
|
|
}
|
|
|
|
})
|
2020-12-15 00:07:23 +01:00
|
|
|
|
2023-03-02 23:48:02 +01:00
|
|
|
// Perform bind updates and keepalive sends concurrently with tunnel use.
|
|
|
|
t.Run("bindUpdate and keepalive", func(t *testing.T) {
|
|
|
|
const iters = 10
|
|
|
|
for i := 0; i < iters; i++ {
|
|
|
|
for _, peer := range pair {
|
|
|
|
peer.dev.BindUpdate()
|
|
|
|
peer.dev.SendKeepalivesToPeersWithCurrentKeypair()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2020-12-15 00:07:23 +01:00
|
|
|
close(done)
|
2019-10-12 18:44:05 +02:00
|
|
|
}
|
2018-03-08 16:44:27 +01:00
|
|
|
|
2021-01-06 03:14:59 +01:00
|
|
|
func BenchmarkLatency(b *testing.B) {
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(b, true)
|
2021-01-06 03:14:59 +01:00
|
|
|
|
|
|
|
// Establish a connection.
|
|
|
|
pair.Send(b, Ping, nil)
|
|
|
|
pair.Send(b, Pong, nil)
|
|
|
|
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
pair.Send(b, Ping, nil)
|
|
|
|
pair.Send(b, Pong, nil)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkThroughput(b *testing.B) {
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(b, true)
|
2021-01-06 03:14:59 +01:00
|
|
|
|
|
|
|
// Establish a connection.
|
|
|
|
pair.Send(b, Ping, nil)
|
|
|
|
pair.Send(b, Pong, nil)
|
|
|
|
|
|
|
|
// Measure how long it takes to receive b.N packets,
|
|
|
|
// starting when we receive the first packet.
|
2022-08-30 16:43:11 +02:00
|
|
|
var recv atomic.Uint64
|
2021-01-06 03:14:59 +01:00
|
|
|
var elapsed time.Duration
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var start time.Time
|
|
|
|
for {
|
|
|
|
<-pair[0].tun.Inbound
|
2022-08-30 16:43:11 +02:00
|
|
|
new := recv.Add(1)
|
2021-01-06 03:14:59 +01:00
|
|
|
if new == 1 {
|
|
|
|
start = time.Now()
|
|
|
|
}
|
|
|
|
// Careful! Don't change this to else if; b.N can be equal to 1.
|
|
|
|
if new == uint64(b.N) {
|
|
|
|
elapsed = time.Since(start)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// Send packets as fast as we can until we've received enough.
|
|
|
|
ping := tuntest.Ping(pair[0].ip, pair[1].ip)
|
|
|
|
pingc := pair[1].tun.Outbound
|
|
|
|
var sent uint64
|
2022-08-30 16:43:11 +02:00
|
|
|
for recv.Load() != uint64(b.N) {
|
2021-01-06 03:14:59 +01:00
|
|
|
sent++
|
|
|
|
pingc <- ping
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
b.ReportMetric(float64(elapsed)/float64(b.N), "ns/op")
|
|
|
|
b.ReportMetric(1-float64(b.N)/float64(sent), "packet-loss")
|
|
|
|
}
|
2021-01-26 20:39:48 +01:00
|
|
|
|
|
|
|
func BenchmarkUAPIGet(b *testing.B) {
|
2021-02-22 04:30:31 +01:00
|
|
|
pair := genTestPair(b, true)
|
2021-01-26 20:39:48 +01:00
|
|
|
pair.Send(b, Ping, nil)
|
|
|
|
pair.Send(b, Pong, nil)
|
|
|
|
b.ReportAllocs()
|
|
|
|
b.ResetTimer()
|
|
|
|
for i := 0; i < b.N; i++ {
|
2021-02-17 22:19:27 +01:00
|
|
|
pair[0].dev.IpcGetOperation(io.Discard)
|
2021-01-26 20:39:48 +01:00
|
|
|
}
|
|
|
|
}
|
2021-02-02 19:41:20 +01:00
|
|
|
|
|
|
|
func goroutineLeakCheck(t *testing.T) {
|
|
|
|
goroutines := func() (int, []byte) {
|
|
|
|
p := pprof.Lookup("goroutine")
|
|
|
|
b := new(bytes.Buffer)
|
|
|
|
p.WriteTo(b, 1)
|
|
|
|
return p.Count(), b.Bytes()
|
|
|
|
}
|
|
|
|
|
|
|
|
startGoroutines, startStacks := goroutines()
|
|
|
|
t.Cleanup(func() {
|
|
|
|
if t.Failed() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Give goroutines time to exit, if they need it.
|
2021-02-03 17:26:27 +01:00
|
|
|
for i := 0; i < 10000; i++ {
|
|
|
|
if runtime.NumGoroutine() <= startGoroutines {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
time.Sleep(1 * time.Millisecond)
|
2021-02-02 19:41:20 +01:00
|
|
|
}
|
2021-02-03 17:26:27 +01:00
|
|
|
endGoroutines, endStacks := goroutines()
|
|
|
|
t.Logf("starting stacks:\n%s\n", startStacks)
|
|
|
|
t.Logf("ending stacks:\n%s\n", endStacks)
|
|
|
|
t.Fatalf("expected %d goroutines, got %d, leak?", startGoroutines, endGoroutines)
|
2021-02-02 19:41:20 +01:00
|
|
|
})
|
|
|
|
}
|
2023-03-02 23:48:02 +01:00
|
|
|
|
|
|
|
type fakeBindSized struct {
|
|
|
|
size int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (b *fakeBindSized) Open(port uint16) (fns []conn.ReceiveFunc, actualPort uint16, err error) {
|
|
|
|
return nil, 0, nil
|
|
|
|
}
|
|
|
|
func (b *fakeBindSized) Close() error { return nil }
|
|
|
|
func (b *fakeBindSized) SetMark(mark uint32) error { return nil }
|
2023-03-13 17:55:05 +01:00
|
|
|
func (b *fakeBindSized) Send(bufs [][]byte, ep conn.Endpoint) error { return nil }
|
2023-03-02 23:48:02 +01:00
|
|
|
func (b *fakeBindSized) ParseEndpoint(s string) (conn.Endpoint, error) { return nil, nil }
|
|
|
|
func (b *fakeBindSized) BatchSize() int { return b.size }
|
|
|
|
|
|
|
|
type fakeTUNDeviceSized struct {
|
|
|
|
size int
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *fakeTUNDeviceSized) File() *os.File { return nil }
|
2023-03-13 17:55:05 +01:00
|
|
|
func (t *fakeTUNDeviceSized) Read(bufs [][]byte, sizes []int, offset int) (n int, err error) {
|
2023-03-02 23:48:02 +01:00
|
|
|
return 0, nil
|
|
|
|
}
|
2023-03-13 17:55:05 +01:00
|
|
|
func (t *fakeTUNDeviceSized) Write(bufs [][]byte, offset int) (int, error) { return 0, nil }
|
|
|
|
func (t *fakeTUNDeviceSized) MTU() (int, error) { return 0, nil }
|
|
|
|
func (t *fakeTUNDeviceSized) Name() (string, error) { return "", nil }
|
|
|
|
func (t *fakeTUNDeviceSized) Events() <-chan tun.Event { return nil }
|
|
|
|
func (t *fakeTUNDeviceSized) Close() error { return nil }
|
|
|
|
func (t *fakeTUNDeviceSized) BatchSize() int { return t.size }
|
2023-03-02 23:48:02 +01:00
|
|
|
|
|
|
|
func TestBatchSize(t *testing.T) {
|
|
|
|
d := Device{}
|
|
|
|
|
|
|
|
d.net.bind = &fakeBindSized{1}
|
|
|
|
d.tun.device = &fakeTUNDeviceSized{1}
|
|
|
|
if want, got := 1, d.BatchSize(); got != want {
|
|
|
|
t.Errorf("expected batch size %d, got %d", want, got)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.net.bind = &fakeBindSized{1}
|
|
|
|
d.tun.device = &fakeTUNDeviceSized{128}
|
|
|
|
if want, got := 128, d.BatchSize(); got != want {
|
|
|
|
t.Errorf("expected batch size %d, got %d", want, got)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.net.bind = &fakeBindSized{128}
|
|
|
|
d.tun.device = &fakeTUNDeviceSized{1}
|
|
|
|
if want, got := 128, d.BatchSize(); got != want {
|
|
|
|
t.Errorf("expected batch size %d, got %d", want, got)
|
|
|
|
}
|
|
|
|
|
|
|
|
d.net.bind = &fakeBindSized{128}
|
|
|
|
d.tun.device = &fakeTUNDeviceSized{128}
|
|
|
|
if want, got := 128, d.BatchSize(); got != want {
|
|
|
|
t.Errorf("expected batch size %d, got %d", want, got)
|
|
|
|
}
|
|
|
|
}
|