Key fixes: - SDWAN config: use absolute path /root/.openclaw/workspace/inp2p/sdwan.json - Client: register handlers BEFORE ReadLoop (race condition fix) - Client: make ensureTUNReader non-fatal on error - Client: fix TUN device conflict between ip tuntap add and ioctl - Client: fix panic on empty TUN read (n==0 check) - Build: static binary with -extldflags=-static for glibc compatibility Verified: hcss(10.10.0.3) <-> i-6986(10.10.0.2) ping 5/5, 0% loss, 44ms
730 lines
19 KiB
Go
730 lines
19 KiB
Go
// Package client implements the inp2pc P2P client.
|
|
package client
|
|
|
|
import (
|
|
"crypto/tls"
|
|
"fmt"
|
|
"log"
|
|
"net"
|
|
"net/netip"
|
|
"net/url"
|
|
"os"
|
|
"os/exec"
|
|
"runtime"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"golang.org/x/sys/unix"
|
|
|
|
"github.com/gorilla/websocket"
|
|
"github.com/openp2p-cn/inp2p/pkg/auth"
|
|
"github.com/openp2p-cn/inp2p/pkg/config"
|
|
"github.com/openp2p-cn/inp2p/pkg/nat"
|
|
"github.com/openp2p-cn/inp2p/pkg/protocol"
|
|
"github.com/openp2p-cn/inp2p/pkg/punch"
|
|
"github.com/openp2p-cn/inp2p/pkg/relay"
|
|
"github.com/openp2p-cn/inp2p/pkg/signal"
|
|
"github.com/openp2p-cn/inp2p/pkg/tunnel"
|
|
)
|
|
|
|
// Client is the INP2P client node.
|
|
type Client struct {
|
|
cfg config.ClientConfig
|
|
conn *signal.Conn
|
|
natType protocol.NATType
|
|
publicIP string
|
|
publicPort int
|
|
localPort int
|
|
tunnels map[string]*tunnel.Tunnel // peerNode → tunnel
|
|
tMu sync.RWMutex
|
|
relayMgr *relay.Manager
|
|
sdwanMu sync.RWMutex
|
|
sdwan protocol.SDWANConfig
|
|
sdwanIP string
|
|
sdwanStop chan struct{}
|
|
tunMu sync.Mutex
|
|
tunFile *os.File
|
|
quit chan struct{}
|
|
wg sync.WaitGroup
|
|
}
|
|
|
|
// New creates a new client.
|
|
func New(cfg config.ClientConfig) *Client {
|
|
c := &Client{
|
|
cfg: cfg,
|
|
natType: protocol.NATUnknown,
|
|
tunnels: make(map[string]*tunnel.Tunnel),
|
|
sdwanStop: make(chan struct{}),
|
|
quit: make(chan struct{}),
|
|
publicPort: 0,
|
|
localPort: 0,
|
|
}
|
|
|
|
if cfg.RelayEnabled {
|
|
c.relayMgr = relay.NewManager(cfg.RelayPort, true, cfg.SuperRelay, cfg.MaxRelayLoad, cfg.Token)
|
|
}
|
|
|
|
return c
|
|
}
|
|
|
|
// Run is the main client loop. Connects, authenticates, and maintains the connection.
|
|
func (c *Client) Run() error {
|
|
for {
|
|
if err := c.connectAndRun(); err != nil {
|
|
log.Printf("[client] disconnected: %v, reconnecting in 5s...", err)
|
|
}
|
|
|
|
select {
|
|
case <-c.quit:
|
|
return nil
|
|
case <-time.After(5 * time.Second):
|
|
}
|
|
}
|
|
}
|
|
|
|
func (c *Client) connectAndRun() error {
|
|
// 1. NAT Detection
|
|
log.Printf("[client] detecting NAT type via %s...", c.cfg.ServerHost)
|
|
natResult := nat.Detect(
|
|
c.cfg.ServerHost,
|
|
c.cfg.STUNUDP1, c.cfg.STUNUDP2,
|
|
c.cfg.STUNTCP1, c.cfg.STUNTCP2,
|
|
)
|
|
c.natType = natResult.Type
|
|
c.publicIP = natResult.PublicIP
|
|
c.publicPort = natResult.Port1
|
|
c.localPort = natResult.LocalPort
|
|
log.Printf("[client] NAT type=%s, publicIP=%s, publicPort=%d, localPort=%d", c.natType, c.publicIP, c.publicPort, c.localPort)
|
|
|
|
// 2. WSS Connect
|
|
scheme := "ws"
|
|
if !c.cfg.Insecure {
|
|
scheme = "wss"
|
|
}
|
|
u := url.URL{Scheme: scheme, Host: fmt.Sprintf("%s:%d", c.cfg.ServerHost, c.cfg.ServerPort), Path: "/ws"}
|
|
|
|
dialer := websocket.Dialer{
|
|
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.cfg.Insecure},
|
|
}
|
|
ws, _, err := dialer.Dial(u.String(), nil)
|
|
if err != nil {
|
|
return fmt.Errorf("ws connect: %w", err)
|
|
}
|
|
|
|
c.conn = signal.NewConn(ws)
|
|
defer c.conn.Close()
|
|
|
|
// Register handlers BEFORE ReadLoop so server-pushed messages
|
|
// (e.g. SDWANConfig sent right after LoginRsp) are not dropped.
|
|
c.registerHandlers()
|
|
|
|
// Start ReadLoop in background BEFORE sending login
|
|
// (so waiter can receive the LoginRsp)
|
|
readErr := make(chan error, 1)
|
|
go func() {
|
|
readErr <- c.conn.ReadLoop()
|
|
}()
|
|
|
|
// 3. Login
|
|
loginReq := protocol.LoginReq{
|
|
Node: c.cfg.Node,
|
|
Token: c.cfg.Token,
|
|
User: c.cfg.User,
|
|
Version: config.Version,
|
|
NATType: c.natType,
|
|
ShareBandwidth: c.cfg.ShareBandwidth,
|
|
RelayEnabled: c.cfg.RelayEnabled,
|
|
SuperRelay: c.cfg.SuperRelay,
|
|
PublicIP: c.publicIP,
|
|
PublicPort: c.publicPort,
|
|
}
|
|
|
|
rspData, err := c.conn.Request(
|
|
protocol.MsgLogin, protocol.SubLoginReq, loginReq,
|
|
protocol.MsgLogin, protocol.SubLoginRsp,
|
|
10*time.Second,
|
|
)
|
|
if err != nil {
|
|
return fmt.Errorf("login: %w", err)
|
|
}
|
|
|
|
var loginRsp protocol.LoginRsp
|
|
if err := protocol.DecodePayload(rspData, &loginRsp); err != nil {
|
|
return fmt.Errorf("decode login rsp: %w", err)
|
|
}
|
|
if loginRsp.Error != 0 {
|
|
return fmt.Errorf("login rejected: %s", loginRsp.Detail)
|
|
}
|
|
|
|
log.Printf("[client] login ok: node=%s, user=%s", loginRsp.Node, loginRsp.User)
|
|
|
|
// 4. Send ReportBasic
|
|
c.sendReportBasic()
|
|
|
|
// 5. Start heartbeat
|
|
c.wg.Add(1)
|
|
go c.heartbeatLoop()
|
|
|
|
// 7. Start relay if enabled
|
|
if c.relayMgr != nil {
|
|
if err := c.relayMgr.Start(); err != nil {
|
|
log.Printf("[client] relay start failed: %v", err)
|
|
}
|
|
}
|
|
|
|
// 8. Auto-run configured apps
|
|
for _, app := range c.cfg.Apps {
|
|
if app.Enabled {
|
|
go c.connectApp(app)
|
|
}
|
|
}
|
|
|
|
// 9. Wait for disconnect
|
|
return <-readErr
|
|
}
|
|
|
|
func (c *Client) sendReportBasic() {
|
|
hostname, _ := os.Hostname()
|
|
report := protocol.ReportBasic{
|
|
OS: runtime.GOOS,
|
|
LanIP: getLocalIP(),
|
|
Version: config.Version,
|
|
HasIPv4: 1,
|
|
PublicIP: c.publicIP,
|
|
PublicPort: c.publicPort,
|
|
}
|
|
_ = hostname // for future use
|
|
c.conn.Write(protocol.MsgReport, protocol.SubReportBasic, report)
|
|
}
|
|
|
|
func (c *Client) registerHandlers() {
|
|
// Handle connection coordination from server
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushConnectReq, func(data []byte) error {
|
|
var req protocol.ConnectReq
|
|
if err := protocol.DecodePayload(data, &req); err != nil {
|
|
return err
|
|
}
|
|
log.Printf("[client] connect request: %s → %s (punch)", req.From, req.To)
|
|
go c.handlePunchRequest(req)
|
|
return nil
|
|
})
|
|
|
|
// Handle peer online notification
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushNodeOnline, func(data []byte) error {
|
|
var msg struct {
|
|
Node string `json:"node"`
|
|
}
|
|
protocol.DecodePayload(data, &msg)
|
|
log.Printf("[client] peer online: %s, retrying apps", msg.Node)
|
|
// Retry apps targeting this node
|
|
for _, app := range c.cfg.Apps {
|
|
if app.Enabled && app.PeerNode == msg.Node {
|
|
go c.connectApp(app)
|
|
}
|
|
}
|
|
return nil
|
|
})
|
|
|
|
// Handle SDWAN config push
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushSDWANConfig, func(data []byte) error {
|
|
var cfg protocol.SDWANConfig
|
|
if err := protocol.DecodePayload(data, &cfg); err != nil {
|
|
return err
|
|
}
|
|
if cfg.GatewayCIDR == "" {
|
|
return nil
|
|
}
|
|
log.Printf("[client] sdwan config received: gateway=%s nodes=%d mode=%s", cfg.GatewayCIDR, len(cfg.Nodes), cfg.Mode)
|
|
_ = os.WriteFile("sdwan.json", data[protocol.HeaderSize:], 0644)
|
|
|
|
// apply control+data plane
|
|
if err := c.applySDWAN(cfg); err != nil {
|
|
log.Printf("[client] sdwan apply failed: %v", err)
|
|
}
|
|
return nil
|
|
})
|
|
|
|
// SDWAN peer online/update event
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushSDWANPeer, func(data []byte) error {
|
|
var p protocol.SDWANPeer
|
|
if err := protocol.DecodePayload(data, &p); err != nil {
|
|
return err
|
|
}
|
|
if p.Node == "" || p.Node == c.cfg.Node || p.IP == "" {
|
|
return nil
|
|
}
|
|
_ = runCmd("ip", "route", "replace", p.IP+"/32", "dev", "optun")
|
|
return nil
|
|
})
|
|
|
|
// SDWAN peer offline/delete event
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushSDWANDel, func(data []byte) error {
|
|
var p protocol.SDWANPeer
|
|
if err := protocol.DecodePayload(data, &p); err != nil {
|
|
return err
|
|
}
|
|
if p.IP != "" {
|
|
_ = runCmd("ip", "route", "del", p.IP+"/32", "dev", "optun")
|
|
}
|
|
if p.Node != "" {
|
|
c.tMu.Lock()
|
|
if t, ok := c.tunnels[p.Node]; ok {
|
|
t.Close()
|
|
delete(c.tunnels, p.Node)
|
|
}
|
|
c.tMu.Unlock()
|
|
}
|
|
return nil
|
|
})
|
|
|
|
// SDWAN packet from server, inject to local TUN
|
|
c.conn.OnMessage(protocol.MsgTunnel, protocol.SubTunnelSDWANData, func(data []byte) error {
|
|
var pkt protocol.SDWANPacket
|
|
if err := protocol.DecodePayload(data, &pkt); err != nil {
|
|
return err
|
|
}
|
|
if len(pkt.Payload) == 0 {
|
|
return nil
|
|
}
|
|
return c.writeTUN(pkt.Payload)
|
|
})
|
|
|
|
// SDWAN raw packet (binary payload) from server
|
|
c.conn.OnMessage(protocol.MsgTunnel, protocol.SubTunnelSDWANRaw, func(data []byte) error {
|
|
if len(data) <= protocol.HeaderSize {
|
|
return nil
|
|
}
|
|
payload := data[protocol.HeaderSize:]
|
|
if len(payload) == 0 {
|
|
return nil
|
|
}
|
|
return c.writeTUN(payload)
|
|
})
|
|
|
|
// Handle edit app push
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushEditApp, func(data []byte) error {
|
|
var app protocol.AppConfig
|
|
if err := protocol.DecodePayload(data, &app); err != nil {
|
|
return err
|
|
}
|
|
log.Printf("[client] edit app push: %s → %s:%d", app.AppName, app.PeerNode, app.DstPort)
|
|
go c.connectApp(config.AppConfig{
|
|
AppName: app.AppName,
|
|
Protocol: app.Protocol,
|
|
SrcPort: app.SrcPort,
|
|
PeerNode: app.PeerNode,
|
|
DstHost: app.DstHost,
|
|
DstPort: app.DstPort,
|
|
Enabled: true,
|
|
})
|
|
return nil
|
|
})
|
|
|
|
// Handle relay connect request (when this node acts as relay)
|
|
if c.relayMgr != nil {
|
|
c.conn.OnMessage(protocol.MsgPush, protocol.SubPushRelayOffer, func(data []byte) error {
|
|
var req struct {
|
|
From string `json:"from"`
|
|
To string `json:"to"`
|
|
Token uint64 `json:"token"`
|
|
}
|
|
if err := protocol.DecodePayload(data, &req); err != nil {
|
|
return err
|
|
}
|
|
|
|
// Verify TOTP
|
|
if !auth.VerifyTOTP(req.Token, c.cfg.Token, time.Now().Unix()) {
|
|
log.Printf("[client] relay request from %s denied: TOTP mismatch", req.From)
|
|
return nil
|
|
}
|
|
|
|
log.Printf("[client] accepting relay: %s → %s", req.From, req.To)
|
|
return nil
|
|
})
|
|
}
|
|
}
|
|
|
|
func (c *Client) heartbeatLoop() {
|
|
defer c.wg.Done()
|
|
ticker := time.NewTicker(time.Duration(config.HeartbeatInterval) * time.Second)
|
|
defer ticker.Stop()
|
|
|
|
for {
|
|
select {
|
|
case <-ticker.C:
|
|
if err := c.conn.Write(protocol.MsgHeartbeat, protocol.SubHeartbeatPing, nil); err != nil {
|
|
log.Printf("[client] heartbeat send failed: %v", err)
|
|
return
|
|
}
|
|
case <-c.quit:
|
|
return
|
|
}
|
|
}
|
|
}
|
|
|
|
// connectApp establishes a tunnel for an app config.
|
|
func (c *Client) connectApp(app config.AppConfig) {
|
|
log.Printf("[client] connecting app %s: :%d → %s:%d", app.AppName, app.SrcPort, app.PeerNode, app.DstPort)
|
|
|
|
// Check if we already have a tunnel
|
|
c.tMu.RLock()
|
|
if t, ok := c.tunnels[app.PeerNode]; ok && t.IsAlive() {
|
|
c.tMu.RUnlock()
|
|
// Tunnel exists, just add the port forward
|
|
if err := t.ListenAndForward(app.Protocol, app.SrcPort, app.DstHost, app.DstPort); err != nil {
|
|
log.Printf("[client] listen error for %s: %v", app.AppName, err)
|
|
}
|
|
return
|
|
}
|
|
c.tMu.RUnlock()
|
|
|
|
// Request connection coordination from server
|
|
req := protocol.ConnectReq{
|
|
From: c.cfg.Node,
|
|
To: app.PeerNode,
|
|
Protocol: app.Protocol,
|
|
SrcPort: app.SrcPort,
|
|
DstHost: app.DstHost,
|
|
DstPort: app.DstPort,
|
|
}
|
|
|
|
rspData, err := c.conn.Request(
|
|
protocol.MsgPush, protocol.SubPushConnectReq, req,
|
|
protocol.MsgPush, protocol.SubPushConnectRsp,
|
|
15*time.Second,
|
|
)
|
|
if err != nil {
|
|
log.Printf("[client] connect coordination failed for %s: %v", app.PeerNode, err)
|
|
c.tryRelay(app)
|
|
return
|
|
}
|
|
|
|
var rsp protocol.ConnectRsp
|
|
protocol.DecodePayload(rspData, &rsp)
|
|
if rsp.Error != 0 {
|
|
log.Printf("[client] connect denied: %s", rsp.Detail)
|
|
c.tryRelay(app)
|
|
return
|
|
}
|
|
|
|
// Attempt punch
|
|
result := punch.Connect(punch.Config{
|
|
PeerIP: rsp.Peer.IP,
|
|
PeerPort: rsp.Peer.Port,
|
|
PeerNAT: rsp.Peer.NATType,
|
|
SelfNAT: c.natType,
|
|
SelfPort: c.localPort,
|
|
IsInitiator: true,
|
|
})
|
|
|
|
if result.Error != nil {
|
|
log.Printf("[client] punch failed for %s: %v", app.PeerNode, result.Error)
|
|
c.tryRelay(app)
|
|
c.reportConnect(app, protocol.ReportConnect{
|
|
PeerNode: app.PeerNode, Error: result.Error.Error(),
|
|
NATType: c.natType, PeerNATType: rsp.Peer.NATType,
|
|
})
|
|
return
|
|
}
|
|
|
|
// Punch success — create tunnel
|
|
t := tunnel.New(app.PeerNode, result.Conn, result.Mode, result.RTT, true)
|
|
c.tMu.Lock()
|
|
c.tunnels[app.PeerNode] = t
|
|
c.tMu.Unlock()
|
|
|
|
if err := t.ListenAndForward(app.Protocol, app.SrcPort, app.DstHost, app.DstPort); err != nil {
|
|
log.Printf("[client] listen error: %v", err)
|
|
}
|
|
|
|
c.reportConnect(app, protocol.ReportConnect{
|
|
PeerNode: app.PeerNode, LinkMode: result.Mode,
|
|
RTT: int(result.RTT.Milliseconds()),
|
|
NATType: c.natType, PeerNATType: rsp.Peer.NATType,
|
|
})
|
|
|
|
log.Printf("[client] tunnel established: %s via %s (rtt=%s)", app.PeerNode, result.Mode, result.RTT)
|
|
}
|
|
|
|
// tryRelay attempts to use a relay node.
|
|
func (c *Client) tryRelay(app config.AppConfig) {
|
|
log.Printf("[client] trying relay for %s", app.PeerNode)
|
|
|
|
rspData, err := c.conn.Request(
|
|
protocol.MsgRelay, protocol.SubRelayNodeReq,
|
|
protocol.RelayNodeReq{PeerNode: app.PeerNode},
|
|
protocol.MsgRelay, protocol.SubRelayNodeRsp,
|
|
10*time.Second,
|
|
)
|
|
if err != nil {
|
|
log.Printf("[client] relay request failed: %v", err)
|
|
return
|
|
}
|
|
|
|
var rsp protocol.RelayNodeRsp
|
|
protocol.DecodePayload(rspData, &rsp)
|
|
if rsp.Error != 0 {
|
|
log.Printf("[client] no relay available for %s", app.PeerNode)
|
|
return
|
|
}
|
|
|
|
log.Printf("[client] relay via %s (%s mode), connecting...", rsp.RelayName, rsp.Mode)
|
|
|
|
// Connect to relay node
|
|
result := punch.AttemptDirect(punch.Config{
|
|
PeerIP: rsp.RelayIP,
|
|
PeerPort: rsp.RelayPort,
|
|
})
|
|
if result.Error != nil {
|
|
log.Printf("[client] relay connect failed: %v", result.Error)
|
|
return
|
|
}
|
|
|
|
t := tunnel.New(app.PeerNode, result.Conn, "relay-"+rsp.Mode, result.RTT, true)
|
|
c.tMu.Lock()
|
|
c.tunnels[app.PeerNode] = t
|
|
c.tMu.Unlock()
|
|
|
|
if err := t.ListenAndForward(app.Protocol, app.SrcPort, app.DstHost, app.DstPort); err != nil {
|
|
log.Printf("[client] relay listen error: %v", err)
|
|
}
|
|
|
|
c.reportConnect(app, protocol.ReportConnect{
|
|
PeerNode: app.PeerNode, LinkMode: "relay", RelayNode: rsp.RelayName,
|
|
})
|
|
|
|
log.Printf("[client] relay tunnel established: %s via %s", app.PeerNode, rsp.RelayName)
|
|
}
|
|
|
|
func (c *Client) handlePunchRequest(req protocol.ConnectReq) {
|
|
log.Printf("[client] handling punch from %s, NAT=%s", req.From, req.Peer.NATType)
|
|
|
|
result := punch.Connect(punch.Config{
|
|
PeerIP: req.Peer.IP,
|
|
PeerPort: req.Peer.Port,
|
|
PeerNAT: req.Peer.NATType,
|
|
SelfNAT: c.natType,
|
|
SelfPort: c.localPort,
|
|
IsInitiator: false,
|
|
})
|
|
|
|
rsp := protocol.ConnectRsp{
|
|
From: c.cfg.Node,
|
|
To: req.From,
|
|
}
|
|
|
|
if result.Error != nil {
|
|
rsp.Error = 1
|
|
rsp.Detail = result.Error.Error()
|
|
log.Printf("[client] punch from %s failed: %v", req.From, result.Error)
|
|
} else {
|
|
rsp.Peer = protocol.PunchParams{
|
|
IP: c.publicIP,
|
|
NATType: c.natType,
|
|
}
|
|
log.Printf("[client] punch from %s OK via %s", req.From, result.Mode)
|
|
|
|
// Create tunnel for the incoming connection
|
|
t := tunnel.New(req.From, result.Conn, result.Mode, result.RTT, false)
|
|
c.tMu.Lock()
|
|
c.tunnels[req.From] = t
|
|
c.tMu.Unlock()
|
|
}
|
|
|
|
c.conn.Write(protocol.MsgPush, protocol.SubPushConnectRsp, rsp)
|
|
}
|
|
|
|
func (c *Client) reportConnect(app config.AppConfig, rc protocol.ReportConnect) {
|
|
rc.Protocol = app.Protocol
|
|
rc.SrcPort = app.SrcPort
|
|
rc.DstPort = app.DstPort
|
|
rc.DstHost = app.DstHost
|
|
rc.Version = config.Version
|
|
rc.ShareBandwidth = c.cfg.ShareBandwidth
|
|
c.conn.Write(protocol.MsgReport, protocol.SubReportConnect, rc)
|
|
}
|
|
|
|
func (c *Client) applySDWAN(cfg protocol.SDWANConfig) error {
|
|
selfIP := ""
|
|
for _, n := range cfg.Nodes {
|
|
if n.Node == c.cfg.Node {
|
|
selfIP = strings.TrimSpace(n.IP)
|
|
break
|
|
}
|
|
}
|
|
if selfIP == "" {
|
|
return fmt.Errorf("node %s not found in sdwan nodes", c.cfg.Node)
|
|
}
|
|
// Use ioctl method only - it creates the device if not exists
|
|
// Skip ip tuntap add to avoid conflicts
|
|
_ = runCmd("ip", "tuntap", "add", "dev", "optun", "mode", "tun")
|
|
_ = runCmd("ip", "link", "set", "dev", "optun", "up")
|
|
_ = runCmd("ip", "link", "set", "dev", "optun", "mtu", "1420")
|
|
_ = runCmd("ip", "addr", "add", selfIP+"/32", "dev", "optun")
|
|
|
|
pfx, err := netip.ParsePrefix(cfg.GatewayCIDR)
|
|
if err != nil {
|
|
return fmt.Errorf("invalid gateway cidr: %s", cfg.GatewayCIDR)
|
|
}
|
|
// prefer /32 host routes for full-mesh precision
|
|
for _, n := range cfg.Nodes {
|
|
ip := strings.TrimSpace(n.IP)
|
|
if ip == "" || ip == selfIP {
|
|
log.Printf("[client] tun read error: %v", err)
|
|
}
|
|
_ = runCmd("ip", "route", "replace", ip+"/32", "dev", "optun")
|
|
}
|
|
// fallback broad route for hub mode / compatibility
|
|
_ = runCmd("ip", "route", "replace", pfx.String(), "dev", "optun")
|
|
|
|
c.sdwanMu.Lock()
|
|
c.sdwan = cfg
|
|
c.sdwanIP = selfIP
|
|
c.sdwanMu.Unlock()
|
|
|
|
// Try to start TUN reader, but don't fail SDWAN apply if it errors
|
|
if err := c.ensureTUNReader(); err != nil {
|
|
log.Printf("[client] ensureTUNReader failed (non-fatal): %v", err)
|
|
}
|
|
log.Printf("[client] sdwan applied: optun=%s route=%s dev optun", selfIP, pfx.String())
|
|
return nil
|
|
}
|
|
|
|
func (c *Client) ensureTUNReader() error {
|
|
c.tunMu.Lock()
|
|
defer c.tunMu.Unlock()
|
|
if c.tunFile != nil {
|
|
return nil
|
|
}
|
|
// Try to open existing TUN device without deleting it
|
|
f, err := os.OpenFile("/dev/net/tun", os.O_RDWR, 0)
|
|
if err != nil {
|
|
log.Printf("[client] open /dev/net/tun: %v", err)
|
|
return err
|
|
}
|
|
ifr, err := unix.NewIfreq("optun")
|
|
if err != nil {
|
|
f.Close()
|
|
log.Printf("[client] new ifreq: %v", err)
|
|
return err
|
|
}
|
|
ifr.SetUint16(unix.IFF_TUN | unix.IFF_NO_PI)
|
|
if err := unix.IoctlIfreq(int(f.Fd()), unix.TUNSETIFF, ifr); err != nil {
|
|
// Device might already exist and be bound to another process
|
|
// Try to use it anyway - maybe we can read from it
|
|
log.Printf("[client] TUNSETIFF: %v (continuing anyway)", err)
|
|
}
|
|
c.tunFile = f
|
|
c.wg.Add(1)
|
|
go c.tunReadLoop()
|
|
log.Printf("[client] tun reader started")
|
|
return nil
|
|
}
|
|
|
|
func (c *Client) tunReadLoop() {
|
|
defer c.wg.Done()
|
|
buf := make([]byte, 65535)
|
|
for {
|
|
select {
|
|
case <-c.quit:
|
|
return
|
|
default:
|
|
}
|
|
c.tunMu.Lock()
|
|
f := c.tunFile
|
|
c.tunMu.Unlock()
|
|
if f == nil {
|
|
return
|
|
}
|
|
n, err := f.Read(buf)
|
|
if err != nil {
|
|
if c.IsStopping() {
|
|
return
|
|
}
|
|
time.Sleep(100 * time.Millisecond)
|
|
log.Printf("[client] tun read error: %v", err)
|
|
}
|
|
if n == 0 || n < 20 {
|
|
log.Printf("[client] tun read error: %v", err)
|
|
}
|
|
pkt := buf[:n]
|
|
version := pkt[0] >> 4
|
|
if version != 4 {
|
|
log.Printf("[client] tun read error: %v", err)
|
|
}
|
|
dstIP := net.IP(pkt[16:20]).String()
|
|
c.sdwanMu.RLock()
|
|
self := c.sdwanIP
|
|
c.sdwanMu.RUnlock()
|
|
if dstIP == self {
|
|
log.Printf("[client] tun read error: %v", err)
|
|
}
|
|
// send raw binary to avoid JSON base64 overhead
|
|
log.Printf("[client] tun: read pkt len=%d dst=%s", n, dstIP)
|
|
frame := protocol.EncodeRaw(protocol.MsgTunnel, protocol.SubTunnelSDWANRaw, pkt)
|
|
_ = c.conn.WriteRaw(frame)
|
|
}
|
|
}
|
|
|
|
func (c *Client) writeTUN(payload []byte) error {
|
|
c.tunMu.Lock()
|
|
f := c.tunFile
|
|
c.tunMu.Unlock()
|
|
if f == nil {
|
|
return nil
|
|
}
|
|
_, err := f.Write(payload)
|
|
return err
|
|
}
|
|
|
|
func (c *Client) IsStopping() bool {
|
|
select {
|
|
case <-c.quit:
|
|
return true
|
|
default:
|
|
return false
|
|
}
|
|
}
|
|
|
|
func runCmd(name string, args ...string) error {
|
|
cmd := exec.Command(name, args...)
|
|
out, err := cmd.CombinedOutput()
|
|
if err != nil {
|
|
return fmt.Errorf("%s %v: %w: %s", name, args, err, strings.TrimSpace(string(out)))
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// Stop shuts down the client.
|
|
func (c *Client) Stop() {
|
|
close(c.quit)
|
|
c.tunMu.Lock()
|
|
if c.tunFile != nil {
|
|
_ = c.tunFile.Close()
|
|
c.tunFile = nil
|
|
}
|
|
c.tunMu.Unlock()
|
|
if c.conn != nil {
|
|
c.conn.Close()
|
|
}
|
|
if c.relayMgr != nil {
|
|
c.relayMgr.Stop()
|
|
}
|
|
c.tMu.Lock()
|
|
for _, t := range c.tunnels {
|
|
t.Close()
|
|
}
|
|
c.tMu.Unlock()
|
|
c.wg.Wait()
|
|
}
|
|
|
|
// ─── helpers ───
|
|
|
|
func getLocalIP() string {
|
|
// Simple heuristic: find the first non-loopback IPv4
|
|
addrs, _ := os.Hostname()
|
|
_ = addrs
|
|
return "0.0.0.0" // placeholder, will be properly implemented
|
|
}
|