Full Syncthing tray wrapper with: - System tray with 5 icon states (idle/syncing/paused/error/disconnected) - Syncthing REST API client with auto-discovered API key - Long-polling event listener for real-time status - Transfer rate monitoring, folder tracking, recent files, conflict counting - Full context menu with folders, recent files, settings toggles - Embedded admin panel binary (webview, requires CGO) - OS notifications via beeep (per-event configurable) - Syncthing process management with auto-restart - Cross-platform installer with autostart - CI pipeline for Linux (.deb + .tar.gz) and Windows (.zip) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
35
internal/monitor/conflicts.go
Normal file
35
internal/monitor/conflicts.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package monitor
|
||||
|
||||
import "sync"
|
||||
|
||||
// ConflictTracker counts sync conflicts across all folders.
|
||||
type ConflictTracker struct {
|
||||
mu sync.Mutex
|
||||
count int
|
||||
}
|
||||
|
||||
// NewConflictTracker creates a new conflict tracker.
|
||||
func NewConflictTracker() *ConflictTracker {
|
||||
return &ConflictTracker{}
|
||||
}
|
||||
|
||||
// SetCount updates the total conflict count.
|
||||
func (ct *ConflictTracker) SetCount(n int) {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
ct.count = n
|
||||
}
|
||||
|
||||
// Increment adds to the conflict count.
|
||||
func (ct *ConflictTracker) Increment() {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
ct.count++
|
||||
}
|
||||
|
||||
// Count returns the current conflict count.
|
||||
func (ct *ConflictTracker) Count() int {
|
||||
ct.mu.Lock()
|
||||
defer ct.mu.Unlock()
|
||||
return ct.count
|
||||
}
|
||||
60
internal/monitor/folders.go
Normal file
60
internal/monitor/folders.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
st "git.davoryn.de/calic/syncwarden/internal/syncthing"
|
||||
)
|
||||
|
||||
// FolderTracker tracks per-folder status.
|
||||
type FolderTracker struct {
|
||||
mu sync.Mutex
|
||||
folders []FolderInfo
|
||||
}
|
||||
|
||||
// NewFolderTracker creates a new folder tracker.
|
||||
func NewFolderTracker() *FolderTracker {
|
||||
return &FolderTracker{}
|
||||
}
|
||||
|
||||
// UpdateFromConfig sets the folder list from Syncthing config.
|
||||
func (ft *FolderTracker) UpdateFromConfig(folders []st.FolderConfig) {
|
||||
ft.mu.Lock()
|
||||
defer ft.mu.Unlock()
|
||||
|
||||
ft.folders = make([]FolderInfo, len(folders))
|
||||
for i, f := range folders {
|
||||
ft.folders[i] = FolderInfo{
|
||||
ID: f.ID,
|
||||
Label: f.Label,
|
||||
Path: f.Path,
|
||||
State: "unknown",
|
||||
}
|
||||
if f.Label == "" {
|
||||
ft.folders[i].Label = f.ID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateStatus updates the state for a specific folder.
|
||||
func (ft *FolderTracker) UpdateStatus(folderID, state string) {
|
||||
ft.mu.Lock()
|
||||
defer ft.mu.Unlock()
|
||||
|
||||
for i := range ft.folders {
|
||||
if ft.folders[i].ID == folderID {
|
||||
ft.folders[i].State = state
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Folders returns a snapshot of all folder info.
|
||||
func (ft *FolderTracker) Folders() []FolderInfo {
|
||||
ft.mu.Lock()
|
||||
defer ft.mu.Unlock()
|
||||
|
||||
out := make([]FolderInfo, len(ft.folders))
|
||||
copy(out, ft.folders)
|
||||
return out
|
||||
}
|
||||
384
internal/monitor/monitor.go
Normal file
384
internal/monitor/monitor.go
Normal file
@@ -0,0 +1,384 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"git.davoryn.de/calic/syncwarden/internal/config"
|
||||
"git.davoryn.de/calic/syncwarden/internal/icons"
|
||||
st "git.davoryn.de/calic/syncwarden/internal/syncthing"
|
||||
)
|
||||
|
||||
// StatusCallback is called whenever the aggregate status changes.
|
||||
type StatusCallback func(AggregateStatus)
|
||||
|
||||
// EventCallback is called for notable events (for notifications).
|
||||
type EventCallback func(eventType string, data map[string]string)
|
||||
|
||||
// Monitor coordinates all tracking and polling.
|
||||
type Monitor struct {
|
||||
mu sync.Mutex
|
||||
client *st.Client
|
||||
cfg config.Config
|
||||
callback StatusCallback
|
||||
eventCb EventCallback
|
||||
speed *SpeedTracker
|
||||
folders *FolderTracker
|
||||
recent *RecentTracker
|
||||
conflicts *ConflictTracker
|
||||
events *st.EventListener
|
||||
stopCh chan struct{}
|
||||
wg sync.WaitGroup
|
||||
connected bool
|
||||
paused bool
|
||||
lastSync time.Time
|
||||
|
||||
devicesTotal int
|
||||
devicesOnline int
|
||||
pendingDevs int
|
||||
}
|
||||
|
||||
// New creates a new Monitor.
|
||||
func New(client *st.Client, cfg config.Config, callback StatusCallback, eventCb EventCallback) *Monitor {
|
||||
return &Monitor{
|
||||
client: client,
|
||||
cfg: cfg,
|
||||
callback: callback,
|
||||
eventCb: eventCb,
|
||||
speed: NewSpeedTracker(),
|
||||
folders: NewFolderTracker(),
|
||||
recent: NewRecentTracker(),
|
||||
conflicts: NewConflictTracker(),
|
||||
stopCh: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins all monitoring goroutines.
|
||||
func (m *Monitor) Start() {
|
||||
// Start event listener
|
||||
m.events = st.NewEventListener(m.client, m.cfg.LastEventID, m.onEvents)
|
||||
m.events.Start()
|
||||
|
||||
// Start periodic poller
|
||||
m.wg.Add(1)
|
||||
go m.pollLoop()
|
||||
|
||||
// Initial full refresh
|
||||
go m.fullRefresh()
|
||||
}
|
||||
|
||||
// Stop halts all monitoring.
|
||||
func (m *Monitor) Stop() {
|
||||
close(m.stopCh)
|
||||
if m.events != nil {
|
||||
m.events.Stop()
|
||||
}
|
||||
m.wg.Wait()
|
||||
|
||||
// Persist last event ID
|
||||
m.mu.Lock()
|
||||
m.cfg.LastEventID = m.events.LastEventID()
|
||||
m.mu.Unlock()
|
||||
_ = config.Save(m.cfg)
|
||||
}
|
||||
|
||||
func (m *Monitor) pollLoop() {
|
||||
defer m.wg.Done()
|
||||
|
||||
ticker := time.NewTicker(3 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-m.stopCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
m.pollConnections()
|
||||
m.pollHealth()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) pollHealth() {
|
||||
_, err := m.client.Health()
|
||||
m.mu.Lock()
|
||||
wasConnected := m.connected
|
||||
m.connected = err == nil
|
||||
m.mu.Unlock()
|
||||
|
||||
if !wasConnected && err == nil {
|
||||
// Reconnected — do a full refresh
|
||||
go m.fullRefresh()
|
||||
}
|
||||
if wasConnected && err != nil {
|
||||
m.emitStatus()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) pollConnections() {
|
||||
conns, err := m.client.SystemConnections()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
m.speed.Update(conns.Total.InBytesTotal, conns.Total.OutBytesTotal)
|
||||
|
||||
online := 0
|
||||
for _, c := range conns.Connections {
|
||||
if c.Connected {
|
||||
online++
|
||||
}
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.devicesOnline = online
|
||||
m.mu.Unlock()
|
||||
|
||||
m.emitStatus()
|
||||
}
|
||||
|
||||
func (m *Monitor) fullRefresh() {
|
||||
// Get config (folders + devices)
|
||||
cfg, err := m.client.Config()
|
||||
if err != nil {
|
||||
log.Printf("config fetch error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
m.folders.UpdateFromConfig(cfg.Folders)
|
||||
|
||||
m.mu.Lock()
|
||||
m.devicesTotal = len(cfg.Devices)
|
||||
m.connected = true
|
||||
m.mu.Unlock()
|
||||
|
||||
// Query each folder's status
|
||||
allPaused := true
|
||||
for _, f := range cfg.Folders {
|
||||
if !f.Paused {
|
||||
allPaused = false
|
||||
}
|
||||
status, err := m.client.FolderStatus(f.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m.folders.UpdateStatus(f.ID, status.State)
|
||||
}
|
||||
|
||||
m.mu.Lock()
|
||||
m.paused = allPaused && len(cfg.Folders) > 0
|
||||
m.mu.Unlock()
|
||||
|
||||
// Check pending devices
|
||||
pending, err := m.client.PendingDevices()
|
||||
if err == nil {
|
||||
m.mu.Lock()
|
||||
m.pendingDevs = len(pending)
|
||||
m.mu.Unlock()
|
||||
}
|
||||
|
||||
m.emitStatus()
|
||||
}
|
||||
|
||||
func (m *Monitor) onEvents(events []st.Event) {
|
||||
for _, ev := range events {
|
||||
switch ev.Type {
|
||||
case "StateChanged":
|
||||
m.handleStateChanged(ev)
|
||||
case "ItemFinished":
|
||||
m.handleItemFinished(ev)
|
||||
case "DeviceConnected":
|
||||
m.handleDeviceEvent(ev, true)
|
||||
case "DeviceDisconnected":
|
||||
m.handleDeviceEvent(ev, false)
|
||||
case "PendingDevicesChanged":
|
||||
go m.refreshPendingDevices()
|
||||
case "FolderCompletion", "FolderSummary":
|
||||
// Trigger a folder status refresh
|
||||
go m.refreshFolderStatuses()
|
||||
}
|
||||
}
|
||||
m.emitStatus()
|
||||
}
|
||||
|
||||
func (m *Monitor) handleStateChanged(ev st.Event) {
|
||||
data, ok := ev.Data.(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
folder, _ := data["folder"].(string)
|
||||
from, _ := data["from"].(string)
|
||||
to, _ := data["to"].(string)
|
||||
if folder != "" && to != "" {
|
||||
m.folders.UpdateStatus(folder, to)
|
||||
// Notify when folder finishes syncing
|
||||
if from == "syncing" && to == "idle" {
|
||||
m.emitEvent("SyncComplete", map[string]string{
|
||||
"folder": folderLabel(m.folders.Folders(), folder),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) handleItemFinished(ev st.Event) {
|
||||
data, ok := ev.Data.(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
item, _ := data["item"].(string)
|
||||
folder, _ := data["folder"].(string)
|
||||
errStr, _ := data["error"].(string)
|
||||
action, _ := data["action"].(string)
|
||||
|
||||
if errStr != "" {
|
||||
if isConflict(errStr) {
|
||||
m.conflicts.Increment()
|
||||
m.emitEvent("Conflict", map[string]string{
|
||||
"file": filepath.Base(item),
|
||||
"folder": folderLabel(m.folders.Folders(), folder),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if item != "" && folder != "" && action != "delete" {
|
||||
m.recent.Add(filepath.Base(item), folderLabel(m.folders.Folders(), folder))
|
||||
m.mu.Lock()
|
||||
m.lastSync = time.Now()
|
||||
m.mu.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) handleDeviceEvent(ev st.Event, connected bool) {
|
||||
// Re-count online devices
|
||||
go func() {
|
||||
conns, err := m.client.SystemConnections()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
online := 0
|
||||
for _, c := range conns.Connections {
|
||||
if c.Connected {
|
||||
online++
|
||||
}
|
||||
}
|
||||
m.mu.Lock()
|
||||
m.devicesOnline = online
|
||||
m.mu.Unlock()
|
||||
m.emitStatus()
|
||||
}()
|
||||
|
||||
data, ok := ev.Data.(map[string]any)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
deviceName, _ := data["name"].(string)
|
||||
if deviceName == "" {
|
||||
deviceName, _ = data["id"].(string)
|
||||
}
|
||||
if connected {
|
||||
m.emitEvent("DeviceConnected", map[string]string{"name": deviceName})
|
||||
} else {
|
||||
m.emitEvent("DeviceDisconnected", map[string]string{"name": deviceName})
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) refreshPendingDevices() {
|
||||
pending, err := m.client.PendingDevices()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
m.mu.Lock()
|
||||
oldCount := m.pendingDevs
|
||||
m.pendingDevs = len(pending)
|
||||
m.mu.Unlock()
|
||||
|
||||
if len(pending) > oldCount {
|
||||
for _, dev := range pending {
|
||||
name := dev.Name
|
||||
if name == "" {
|
||||
name = dev.DeviceID[:8]
|
||||
}
|
||||
m.emitEvent("NewDevice", map[string]string{"name": name})
|
||||
}
|
||||
}
|
||||
m.emitStatus()
|
||||
}
|
||||
|
||||
func (m *Monitor) emitEvent(eventType string, data map[string]string) {
|
||||
if m.eventCb != nil {
|
||||
m.eventCb(eventType, data)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Monitor) refreshFolderStatuses() {
|
||||
for _, f := range m.folders.Folders() {
|
||||
status, err := m.client.FolderStatus(f.ID)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m.folders.UpdateStatus(f.ID, status.State)
|
||||
}
|
||||
m.emitStatus()
|
||||
}
|
||||
|
||||
func (m *Monitor) emitStatus() {
|
||||
down, up := m.speed.Rates()
|
||||
folders := m.folders.Folders()
|
||||
|
||||
m.mu.Lock()
|
||||
status := AggregateStatus{
|
||||
DevicesTotal: m.devicesTotal,
|
||||
DevicesOnline: m.devicesOnline,
|
||||
DownRate: down,
|
||||
UpRate: up,
|
||||
LastSync: m.lastSync,
|
||||
Paused: m.paused,
|
||||
RecentFiles: m.recent.Files(),
|
||||
ConflictCount: m.conflicts.Count(),
|
||||
Folders: folders,
|
||||
PendingDevices: m.pendingDevs,
|
||||
}
|
||||
|
||||
if !m.connected {
|
||||
status.State = icons.StateDisconnected
|
||||
} else {
|
||||
status.State = stateFromFolders(folders, m.paused)
|
||||
}
|
||||
m.mu.Unlock()
|
||||
|
||||
m.callback(status)
|
||||
}
|
||||
|
||||
// EventData returns the event data field as a typed map.
|
||||
func EventData(ev st.Event) map[string]any {
|
||||
if data, ok := ev.Data.(map[string]any); ok {
|
||||
return data
|
||||
}
|
||||
// Try JSON re-marshal for nested types
|
||||
b, err := json.Marshal(ev.Data)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
var data map[string]any
|
||||
if json.Unmarshal(b, &data) == nil {
|
||||
return data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func isConflict(errStr string) bool {
|
||||
return errStr == "conflict" || errStr == "conflicting changes"
|
||||
}
|
||||
|
||||
func folderLabel(folders []FolderInfo, id string) string {
|
||||
for _, f := range folders {
|
||||
if f.ID == id {
|
||||
return f.Label
|
||||
}
|
||||
}
|
||||
return id
|
||||
}
|
||||
49
internal/monitor/recent.go
Normal file
49
internal/monitor/recent.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const maxRecentFiles = 10
|
||||
|
||||
// RecentTracker maintains a ring buffer of recently synced files.
|
||||
type RecentTracker struct {
|
||||
mu sync.Mutex
|
||||
files []RecentFile
|
||||
}
|
||||
|
||||
// NewRecentTracker creates a new recent files tracker.
|
||||
func NewRecentTracker() *RecentTracker {
|
||||
return &RecentTracker{
|
||||
files: make([]RecentFile, 0, maxRecentFiles),
|
||||
}
|
||||
}
|
||||
|
||||
// Add records a newly synced file.
|
||||
func (rt *RecentTracker) Add(name, folder string) {
|
||||
rt.mu.Lock()
|
||||
defer rt.mu.Unlock()
|
||||
|
||||
rf := RecentFile{
|
||||
Name: name,
|
||||
Folder: folder,
|
||||
Timestamp: time.Now(),
|
||||
}
|
||||
|
||||
// Prepend (most recent first)
|
||||
rt.files = append([]RecentFile{rf}, rt.files...)
|
||||
if len(rt.files) > maxRecentFiles {
|
||||
rt.files = rt.files[:maxRecentFiles]
|
||||
}
|
||||
}
|
||||
|
||||
// Files returns a snapshot of recent files (most recent first).
|
||||
func (rt *RecentTracker) Files() []RecentFile {
|
||||
rt.mu.Lock()
|
||||
defer rt.mu.Unlock()
|
||||
|
||||
out := make([]RecentFile, len(rt.files))
|
||||
copy(out, rt.files)
|
||||
return out
|
||||
}
|
||||
52
internal/monitor/speed.go
Normal file
52
internal/monitor/speed.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SpeedTracker calculates transfer rates by diffing byte counters.
|
||||
type SpeedTracker struct {
|
||||
mu sync.Mutex
|
||||
lastIn int64
|
||||
lastOut int64
|
||||
lastTime time.Time
|
||||
downRate float64
|
||||
upRate float64
|
||||
}
|
||||
|
||||
// NewSpeedTracker creates a new speed tracker.
|
||||
func NewSpeedTracker() *SpeedTracker {
|
||||
return &SpeedTracker{}
|
||||
}
|
||||
|
||||
// Update records new byte counters and calculates rates.
|
||||
func (s *SpeedTracker) Update(inBytes, outBytes int64) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
if !s.lastTime.IsZero() {
|
||||
dt := now.Sub(s.lastTime).Seconds()
|
||||
if dt > 0 {
|
||||
s.downRate = float64(inBytes-s.lastIn) / dt
|
||||
s.upRate = float64(outBytes-s.lastOut) / dt
|
||||
if s.downRate < 0 {
|
||||
s.downRate = 0
|
||||
}
|
||||
if s.upRate < 0 {
|
||||
s.upRate = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
s.lastIn = inBytes
|
||||
s.lastOut = outBytes
|
||||
s.lastTime = now
|
||||
}
|
||||
|
||||
// Rates returns the current download and upload rates in bytes/sec.
|
||||
func (s *SpeedTracker) Rates() (down, up float64) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.downRate, s.upRate
|
||||
}
|
||||
64
internal/monitor/state.go
Normal file
64
internal/monitor/state.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package monitor
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"git.davoryn.de/calic/syncwarden/internal/icons"
|
||||
)
|
||||
|
||||
// AggregateStatus holds the full aggregated status for the tray.
|
||||
type AggregateStatus struct {
|
||||
State icons.State
|
||||
DevicesTotal int
|
||||
DevicesOnline int
|
||||
DownRate float64 // bytes/sec
|
||||
UpRate float64 // bytes/sec
|
||||
LastSync time.Time
|
||||
Paused bool
|
||||
RecentFiles []RecentFile
|
||||
ConflictCount int
|
||||
Folders []FolderInfo
|
||||
PendingDevices int
|
||||
}
|
||||
|
||||
// FolderInfo holds per-folder info for the menu.
|
||||
type FolderInfo struct {
|
||||
ID string
|
||||
Label string
|
||||
Path string
|
||||
State string // "idle", "syncing", "scanning", "error", etc.
|
||||
}
|
||||
|
||||
// RecentFile records a recently synced file.
|
||||
type RecentFile struct {
|
||||
Name string
|
||||
Folder string
|
||||
Timestamp time.Time
|
||||
}
|
||||
|
||||
// stateFromFolders determines the aggregate icon state from folder states.
|
||||
func stateFromFolders(folders []FolderInfo, paused bool) icons.State {
|
||||
if paused {
|
||||
return icons.StatePaused
|
||||
}
|
||||
|
||||
hasError := false
|
||||
hasSyncing := false
|
||||
|
||||
for _, f := range folders {
|
||||
switch f.State {
|
||||
case "error":
|
||||
hasError = true
|
||||
case "syncing", "sync-preparing", "scanning", "sync-waiting", "scan-waiting":
|
||||
hasSyncing = true
|
||||
}
|
||||
}
|
||||
|
||||
if hasError {
|
||||
return icons.StateError
|
||||
}
|
||||
if hasSyncing {
|
||||
return icons.StateSyncing
|
||||
}
|
||||
return icons.StateIdle
|
||||
}
|
||||
Reference in New Issue
Block a user