[TASK] Make yanic more modular for multiple databases (#33)
This commit is contained in:
parent
5502c0affe
commit
f135249795
|
@ -29,6 +29,10 @@ export PATH=$PATH:$GOPATH/bin
|
|||
go get -v -u github.com/FreifunkBremen/yanic/cmd/...
|
||||
```
|
||||
|
||||
#### Work with other databases
|
||||
If you did like a other database solution like influxdb,
|
||||
you are welcome to create another subpackage from database in your fork like the logging.
|
||||
|
||||
### Configurate
|
||||
```sh
|
||||
cp /opt/go/src/github.com/FreifunkBremen/yanic/config_example.toml /etc/yanic.conf
|
||||
|
|
|
@ -6,8 +6,8 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/models"
|
||||
"github.com/FreifunkBremen/yanic/respond"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// Usage: respond-query wlp4s0 "[fe80::eade:27ff:dead:beef%wlp4s0]:1001"
|
||||
|
@ -17,7 +17,7 @@ func main() {
|
|||
|
||||
log.Printf("Sending request address=%s iface=%s", dstAddress, iface)
|
||||
|
||||
nodes := models.NewNodes(&models.Config{})
|
||||
nodes := runtime.NewNodes(&runtime.Config{})
|
||||
|
||||
collector := respond.NewCollector(nil, nodes, iface, 0)
|
||||
collector.SendPacket(net.ParseIP(dstAddress))
|
||||
|
|
|
@ -8,18 +8,20 @@ import (
|
|||
"syscall"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/database"
|
||||
"github.com/FreifunkBremen/yanic/models"
|
||||
"github.com/FreifunkBremen/yanic/database/all"
|
||||
"github.com/FreifunkBremen/yanic/meshviewer"
|
||||
"github.com/FreifunkBremen/yanic/respond"
|
||||
"github.com/FreifunkBremen/yanic/rrd"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
"github.com/FreifunkBremen/yanic/webserver"
|
||||
)
|
||||
|
||||
var (
|
||||
configFile string
|
||||
config *models.Config
|
||||
config *runtime.Config
|
||||
collector *respond.Collector
|
||||
db *database.DB
|
||||
nodes *models.Nodes
|
||||
connections database.Connection
|
||||
nodes *runtime.Nodes
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -33,24 +35,31 @@ func main() {
|
|||
if !timestamps {
|
||||
log.SetFlags(0)
|
||||
}
|
||||
log.Println("Yanic say hello")
|
||||
|
||||
config = models.ReadConfigFile(configFile)
|
||||
config, err := runtime.ReadConfigFile(configFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if config.Influxdb.Enable {
|
||||
db = database.New(config)
|
||||
defer db.Close()
|
||||
connections, err = all.Connect(config.Database.Connection)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
database.Start(connections, config)
|
||||
defer database.Close(connections)
|
||||
|
||||
if importPath != "" {
|
||||
if connections != nil && importPath != "" {
|
||||
importRRD(importPath)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
nodes = models.NewNodes(config)
|
||||
nodes = runtime.NewNodes(config)
|
||||
nodes.Start()
|
||||
meshviewer.Start(config, nodes)
|
||||
|
||||
if config.Respondd.Enable {
|
||||
collector = respond.NewCollector(db, nodes, config.Respondd.Interface, config.Respondd.Port)
|
||||
collector = respond.NewCollector(connections, nodes, config.Respondd.Interface, config.Respondd.Port)
|
||||
collector.Start(config.Respondd.CollectInterval.Duration)
|
||||
defer collector.Close()
|
||||
}
|
||||
|
@ -71,12 +80,10 @@ func main() {
|
|||
func importRRD(path string) {
|
||||
log.Println("importing RRD from", path)
|
||||
for ds := range rrd.Read(path) {
|
||||
db.AddPoint(
|
||||
database.MeasurementGlobal,
|
||||
nil,
|
||||
map[string]interface{}{
|
||||
"nodes": uint32(ds.Nodes),
|
||||
"clients.total": uint32(ds.Clients),
|
||||
connections.AddStatistics(
|
||||
&runtime.GlobalStats{
|
||||
Nodes: uint32(ds.Nodes),
|
||||
Clients: uint32(ds.Clients),
|
||||
},
|
||||
ds.Time,
|
||||
)
|
||||
|
|
|
@ -18,20 +18,9 @@ bind = "127.0.0.1:8080"
|
|||
webroot = "/var/www/html/meshviewer"
|
||||
|
||||
|
||||
|
||||
[nodes]
|
||||
enable = true
|
||||
# structur of nodes.json, which to support
|
||||
# version 1 is to support legacy meshviewer (which are in master branch)
|
||||
# i.e. https://github.com/ffnord/meshviewer/tree/master
|
||||
# version 2 is to support new version of meshviewer (which are in legacy develop branch or newer)
|
||||
# i.e. https://github.com/ffnord/meshviewer/tree/dev
|
||||
# https://github.com/ffrgb/meshviewer/tree/develop
|
||||
nodes_version = 2
|
||||
# path where to store nodes.json
|
||||
nodes_path = "/var/www/html/meshviewer/data/nodes.json"
|
||||
# path where to store graph.json
|
||||
graph_path = "/var/www/html/meshviewer/data/graph.json"
|
||||
|
||||
# state-version of nodes.json to store cached data,
|
||||
# these is the directly collected respondd data
|
||||
state_path = "/var/lib/collector/state.json"
|
||||
|
@ -46,20 +35,39 @@ offline_after = "10m"
|
|||
prune_after = "7d"
|
||||
|
||||
|
||||
[meshviewer]
|
||||
# structur of nodes.json, which to support
|
||||
# version 1 is to support legacy meshviewer (which are in master branch)
|
||||
# i.e. https://github.com/ffnord/meshviewer/tree/master
|
||||
# version 2 is to support new version of meshviewer (which are in legacy develop branch or newer)
|
||||
# i.e. https://github.com/ffnord/meshviewer/tree/dev
|
||||
# https://github.com/ffrgb/meshviewer/tree/develop
|
||||
version = 2
|
||||
# path where to store nodes.json
|
||||
nodes_path = "/var/www/html/meshviewer/data/nodes.json"
|
||||
# path where to store graph.json
|
||||
graph_path = "/var/www/html/meshviewer/data/graph.json"
|
||||
|
||||
[database]
|
||||
# cleaning data of measurement node,
|
||||
# which are older than 7d
|
||||
delete_after = "7d"
|
||||
# how often run the cleaning
|
||||
delete_interval = "1h"
|
||||
|
||||
# Save collected data to InfluxDB
|
||||
# there would be the following measurments:
|
||||
# node: store node spezific data i.e. clients memory, airtime
|
||||
# global: store global data, i.e. count of clients and nodes
|
||||
# firmware: store count of nodes tagged with firmware
|
||||
# model: store count of nodes tagged with hardware model
|
||||
[influxdb]
|
||||
[[database.connection.influxdb]]
|
||||
enable = false
|
||||
address = "http://localhost:8086"
|
||||
database = "ffhb"
|
||||
username = ""
|
||||
password = ""
|
||||
# cleaning data of measurement node,
|
||||
# which are older than 7d
|
||||
delete_after = "7d"
|
||||
# how often run the cleaning
|
||||
delete_interval = "1h"
|
||||
|
||||
[[database.connection.logging]]
|
||||
enable = false
|
||||
path = "/var/log/yanic.log"
|
||||
|
|
|
@ -0,0 +1,55 @@
|
|||
package all
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/database"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
database.Connection
|
||||
list []database.Connection
|
||||
}
|
||||
|
||||
func Connect(configuration interface{}) (database.Connection, error) {
|
||||
var list []database.Connection
|
||||
allConnection := configuration.(map[string][]interface{})
|
||||
for dbType, conn := range database.Adapters {
|
||||
dbConfigs := allConnection[dbType]
|
||||
for _, config := range dbConfigs {
|
||||
connected, err := conn(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if connected == nil {
|
||||
continue
|
||||
}
|
||||
list = append(list, connected)
|
||||
}
|
||||
}
|
||||
return &Connection{list: list}, nil
|
||||
}
|
||||
func (conn *Connection) AddNode(nodeID string, node *runtime.Node) {
|
||||
for _, item := range conn.list {
|
||||
item.AddNode(nodeID, node)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Connection) AddStatistics(stats *runtime.GlobalStats, time time.Time) {
|
||||
for _, item := range conn.list {
|
||||
item.AddStatistics(stats, time)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Connection) DeleteNode(deleteAfter time.Duration) {
|
||||
for _, item := range conn.list {
|
||||
item.DeleteNode(deleteAfter)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Connection) Close() {
|
||||
for _, item := range conn.list {
|
||||
item.Close()
|
||||
}
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package all
|
||||
|
||||
import (
|
||||
_ "github.com/FreifunkBremen/yanic/database/influxdb"
|
||||
_ "github.com/FreifunkBremen/yanic/database/logging"
|
||||
)
|
|
@ -1,165 +1,31 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
imodels "github.com/influxdata/influxdb/models"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/models"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
MeasurementNode = "node" // Measurement for per-node statistics
|
||||
MeasurementGlobal = "global" // Measurement for summarized global statistics
|
||||
MeasurementFirmware = "firmware" // Measurement for firmware statistics
|
||||
MeasurementModel = "model" // Measurement for model statistics
|
||||
batchMaxSize = 500
|
||||
batchTimeout = 5 * time.Second
|
||||
)
|
||||
// Connection interface to use for implementation in e.g. influxdb
|
||||
type Connection interface {
|
||||
// AddNode data for a single node
|
||||
AddNode(nodeID string, node *runtime.Node)
|
||||
AddStatistics(stats *runtime.GlobalStats, time time.Time)
|
||||
|
||||
type DB struct {
|
||||
config *models.Config
|
||||
client client.Client
|
||||
points chan *client.Point
|
||||
wg sync.WaitGroup
|
||||
quit chan struct{}
|
||||
DeleteNode(deleteAfter time.Duration)
|
||||
|
||||
Close()
|
||||
}
|
||||
|
||||
func New(config *models.Config) *DB {
|
||||
// Make client
|
||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: config.Influxdb.Address,
|
||||
Username: config.Influxdb.Username,
|
||||
Password: config.Influxdb.Password,
|
||||
})
|
||||
// Connect function with config to get DB connection interface
|
||||
type Connect func(config interface{}) (Connection, error)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
/*
|
||||
* for selfbinding in use of the package all
|
||||
*/
|
||||
|
||||
db := &DB{
|
||||
config: config,
|
||||
client: c,
|
||||
points: make(chan *client.Point, 1000),
|
||||
quit: make(chan struct{}),
|
||||
}
|
||||
var Adapters = map[string]Connect{}
|
||||
|
||||
db.wg.Add(1)
|
||||
go db.addWorker()
|
||||
go db.deleteWorker()
|
||||
|
||||
return db
|
||||
}
|
||||
|
||||
func (db *DB) DeletePoints() {
|
||||
query := fmt.Sprintf("delete from %s where time < now() - %ds", MeasurementNode, db.config.Influxdb.DeleteAfter.Duration/time.Second)
|
||||
db.client.Query(client.NewQuery(query, db.config.Influxdb.Database, "m"))
|
||||
}
|
||||
|
||||
func (db *DB) AddPoint(name string, tags imodels.Tags, fields imodels.Fields, time time.Time) {
|
||||
point, err := client.NewPoint(name, tags.Map(), fields, time)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
db.points <- point
|
||||
}
|
||||
|
||||
// Saves the values of a CounterMap in the database.
|
||||
// The key are used as 'value' tag.
|
||||
// The value is used as 'counter' field.
|
||||
func (db *DB) AddCounterMap(name string, m models.CounterMap) {
|
||||
now := time.Now()
|
||||
for key, count := range m {
|
||||
db.AddPoint(
|
||||
name,
|
||||
imodels.Tags{
|
||||
imodels.Tag{Key: []byte("value"), Value: []byte(key)},
|
||||
},
|
||||
imodels.Fields{"count": count},
|
||||
now,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Add data for a single node
|
||||
func (db *DB) Add(nodeID string, node *models.Node) {
|
||||
tags, fields := node.ToInflux()
|
||||
db.AddPoint(MeasurementNode, tags, fields, time.Now())
|
||||
}
|
||||
|
||||
// Close all connection and clean up
|
||||
func (db *DB) Close() {
|
||||
close(db.quit)
|
||||
close(db.points)
|
||||
db.wg.Wait()
|
||||
db.client.Close()
|
||||
}
|
||||
|
||||
// prunes node-specific data periodically
|
||||
func (db *DB) deleteWorker() {
|
||||
ticker := time.NewTicker(db.config.Influxdb.DeleteInterval.Duration)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
db.DeletePoints()
|
||||
case <-db.quit:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// stores data points in batches into the influxdb
|
||||
func (db *DB) addWorker() {
|
||||
bpConfig := client.BatchPointsConfig{
|
||||
Database: db.config.Influxdb.Database,
|
||||
Precision: "m",
|
||||
}
|
||||
|
||||
var bp client.BatchPoints
|
||||
var err error
|
||||
var writeNow, closed bool
|
||||
timer := time.NewTimer(batchTimeout)
|
||||
|
||||
for !closed {
|
||||
// wait for new points
|
||||
select {
|
||||
case point, ok := <-db.points:
|
||||
if ok {
|
||||
if bp == nil {
|
||||
// create new batch
|
||||
timer.Reset(batchTimeout)
|
||||
if bp, err = client.NewBatchPoints(bpConfig); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
bp.AddPoint(point)
|
||||
} else {
|
||||
closed = true
|
||||
}
|
||||
case <-timer.C:
|
||||
if bp == nil {
|
||||
timer.Reset(batchTimeout)
|
||||
} else {
|
||||
writeNow = true
|
||||
}
|
||||
}
|
||||
|
||||
// write batch now?
|
||||
if bp != nil && (writeNow || closed || len(bp.Points()) >= batchMaxSize) {
|
||||
log.Println("saving", len(bp.Points()), "points")
|
||||
|
||||
if err = db.client.Write(bp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
writeNow = false
|
||||
bp = nil
|
||||
}
|
||||
}
|
||||
timer.Stop()
|
||||
db.wg.Done()
|
||||
func AddDatabaseType(name string, n Connect) {
|
||||
Adapters[name] = n
|
||||
}
|
||||
|
|
|
@ -0,0 +1,182 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/influxdata/influxdb/client/v2"
|
||||
"github.com/influxdata/influxdb/models"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/database"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
MeasurementNode = "node" // Measurement for per-node statistics
|
||||
MeasurementGlobal = "global" // Measurement for summarized global statistics
|
||||
CounterMeasurementFirmware = "firmware" // Measurement for firmware statistics
|
||||
CounterMeasurementModel = "model" // Measurement for model statistics
|
||||
batchMaxSize = 500
|
||||
batchTimeout = 5 * time.Second
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
database.Connection
|
||||
config Config
|
||||
client client.Client
|
||||
points chan *client.Point
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
type Config map[string]interface{}
|
||||
|
||||
func (c Config) Enable() bool {
|
||||
return c["enable"].(bool)
|
||||
}
|
||||
func (c Config) Address() string {
|
||||
return c["address"].(string)
|
||||
}
|
||||
func (c Config) Database() string {
|
||||
return c["database"].(string)
|
||||
}
|
||||
func (c Config) Username() string {
|
||||
return c["username"].(string)
|
||||
}
|
||||
func (c Config) Password() string {
|
||||
return c["password"].(string)
|
||||
}
|
||||
|
||||
func init() {
|
||||
database.AddDatabaseType("influxdb", Connect)
|
||||
}
|
||||
func Connect(configuration interface{}) (database.Connection, error) {
|
||||
var config Config
|
||||
config = configuration.(map[string]interface{})
|
||||
if !config.Enable() {
|
||||
return nil, nil
|
||||
}
|
||||
// Make client
|
||||
c, err := client.NewHTTPClient(client.HTTPConfig{
|
||||
Addr: config.Address(),
|
||||
Username: config.Username(),
|
||||
Password: config.Password(),
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
db := &Connection{
|
||||
config: config,
|
||||
client: c,
|
||||
points: make(chan *client.Point, 1000),
|
||||
}
|
||||
|
||||
db.wg.Add(1)
|
||||
go db.addWorker()
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func (conn *Connection) DeleteNode(deleteAfter time.Duration) {
|
||||
query := fmt.Sprintf("delete from %s where time < now() - %ds", MeasurementNode, deleteAfter/time.Second)
|
||||
conn.client.Query(client.NewQuery(query, conn.config.Database(), "m"))
|
||||
}
|
||||
|
||||
func (conn *Connection) addPoint(name string, tags models.Tags, fields models.Fields, time time.Time) {
|
||||
point, err := client.NewPoint(name, tags.Map(), fields, time)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
conn.points <- point
|
||||
}
|
||||
|
||||
// Saves the values of a CounterMap in the database.
|
||||
// The key are used as 'value' tag.
|
||||
// The value is used as 'counter' field.
|
||||
func (conn *Connection) addCounterMap(name string, m runtime.CounterMap) {
|
||||
now := time.Now()
|
||||
for key, count := range m {
|
||||
conn.addPoint(
|
||||
name,
|
||||
models.Tags{
|
||||
models.Tag{Key: []byte("value"), Value: []byte(key)},
|
||||
},
|
||||
models.Fields{"count": count},
|
||||
now,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// AddStatistics implementation of database
|
||||
func (conn *Connection) AddStatistics(stats *runtime.GlobalStats, time time.Time) {
|
||||
conn.addPoint(MeasurementGlobal, nil, GlobalStatsFields(stats), time)
|
||||
conn.addCounterMap(CounterMeasurementModel, stats.Models)
|
||||
conn.addCounterMap(CounterMeasurementFirmware, stats.Firmwares)
|
||||
}
|
||||
|
||||
// AddNode implementation of database
|
||||
func (conn *Connection) AddNode(nodeID string, node *runtime.Node) {
|
||||
tags, fields := nodeToInflux(node)
|
||||
conn.addPoint(MeasurementNode, tags, fields, time.Now())
|
||||
}
|
||||
|
||||
// Close all connection and clean up
|
||||
func (conn *Connection) Close() {
|
||||
close(conn.points)
|
||||
conn.wg.Wait()
|
||||
conn.client.Close()
|
||||
}
|
||||
|
||||
// stores data points in batches into the influxdb
|
||||
func (conn *Connection) addWorker() {
|
||||
bpConfig := client.BatchPointsConfig{
|
||||
Database: conn.config.Database(),
|
||||
Precision: "m",
|
||||
}
|
||||
|
||||
var bp client.BatchPoints
|
||||
var err error
|
||||
var writeNow, closed bool
|
||||
timer := time.NewTimer(batchTimeout)
|
||||
|
||||
for !closed {
|
||||
// wait for new points
|
||||
select {
|
||||
case point, ok := <-conn.points:
|
||||
if ok {
|
||||
if bp == nil {
|
||||
// create new batch
|
||||
timer.Reset(batchTimeout)
|
||||
if bp, err = client.NewBatchPoints(bpConfig); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
bp.AddPoint(point)
|
||||
} else {
|
||||
closed = true
|
||||
}
|
||||
case <-timer.C:
|
||||
if bp == nil {
|
||||
timer.Reset(batchTimeout)
|
||||
} else {
|
||||
writeNow = true
|
||||
}
|
||||
}
|
||||
|
||||
// write batch now?
|
||||
if bp != nil && (writeNow || closed || len(bp.Points()) >= batchMaxSize) {
|
||||
log.Println("saving", len(bp.Points()), "points")
|
||||
|
||||
if err = conn.client.Write(bp); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
writeNow = false
|
||||
bp = nil
|
||||
}
|
||||
}
|
||||
timer.Stop()
|
||||
conn.wg.Done()
|
||||
}
|
|
@ -1,29 +1,15 @@
|
|||
package models
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"net"
|
||||
"strconv"
|
||||
|
||||
imodels "github.com/influxdata/influxdb/models"
|
||||
models "github.com/influxdata/influxdb/models"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/jsontime"
|
||||
"github.com/FreifunkBremen/yanic/meshviewer"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// Node struct
|
||||
type Node struct {
|
||||
Address net.IP `json:"address"` // the last known IP address
|
||||
Firstseen jsontime.Time `json:"firstseen"`
|
||||
Lastseen jsontime.Time `json:"lastseen"`
|
||||
Flags meshviewer.Flags `json:"flags"`
|
||||
Statistics *data.Statistics `json:"statistics"`
|
||||
Nodeinfo *data.NodeInfo `json:"nodeinfo"`
|
||||
Neighbours *data.Neighbours `json:"-"`
|
||||
}
|
||||
|
||||
// ToInflux Returns tags and fields for InfluxDB
|
||||
func (node *Node) ToInflux() (tags imodels.Tags, fields imodels.Fields) {
|
||||
// NodeToInflux Returns tags and fields for InfluxDB
|
||||
func nodeToInflux(node *runtime.Node) (tags models.Tags, fields models.Fields) {
|
||||
stats := node.Statistics
|
||||
|
||||
tags.SetString("nodeid", stats.NodeID)
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package influxdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
@ -6,12 +6,13 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
func TestToInflux(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
node := Node{
|
||||
node := &runtime.Node{
|
||||
Statistics: &data.Statistics{
|
||||
NodeID: "foobar",
|
||||
LoadAverage: 0.5,
|
||||
|
@ -65,7 +66,7 @@ func TestToInflux(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
tags, fields := node.ToInflux()
|
||||
tags, fields := nodeToInflux(node)
|
||||
|
||||
assert.Equal("foobar", tags.GetString("nodeid"))
|
||||
assert.Equal("nobody", tags.GetString("owner"))
|
|
@ -0,0 +1,17 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// GlobalStatsFields returns fields for InfluxDB
|
||||
func GlobalStatsFields(stats *runtime.GlobalStats) map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"nodes": stats.Nodes,
|
||||
"gateways": stats.Gateways,
|
||||
"clients.total": stats.Clients,
|
||||
"clients.wifi": stats.ClientsWifi,
|
||||
"clients.wifi24": stats.ClientsWifi24,
|
||||
"clients.wifi5": stats.ClientsWifi5,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
package influxdb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
func TestGlobalStats(t *testing.T) {
|
||||
stats := runtime.NewGlobalStats(createTestNodes())
|
||||
|
||||
assert := assert.New(t)
|
||||
fields := GlobalStatsFields(stats)
|
||||
|
||||
// check fields
|
||||
assert.EqualValues(3, fields["nodes"])
|
||||
}
|
||||
|
||||
func createTestNodes() *runtime.Nodes {
|
||||
nodes := runtime.NewNodes(&runtime.Config{})
|
||||
|
||||
nodeData := &data.ResponseData{
|
||||
Statistics: &data.Statistics{
|
||||
Clients: data.Clients{
|
||||
Total: 23,
|
||||
},
|
||||
},
|
||||
NodeInfo: &data.NodeInfo{
|
||||
Hardware: data.Hardware{
|
||||
Model: "TP-Link 841",
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeData.NodeInfo.Software.Firmware.Release = "2016.1.6+entenhausen1"
|
||||
nodes.Update("abcdef012345", nodeData)
|
||||
|
||||
nodes.Update("112233445566", &data.ResponseData{
|
||||
Statistics: &data.Statistics{
|
||||
Clients: data.Clients{
|
||||
Total: 2,
|
||||
},
|
||||
},
|
||||
NodeInfo: &data.NodeInfo{
|
||||
Hardware: data.Hardware{
|
||||
Model: "TP-Link 841",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
nodes.Update("0xdeadbeef0x", &data.ResponseData{
|
||||
NodeInfo: &data.NodeInfo{
|
||||
VPN: true,
|
||||
Hardware: data.Hardware{
|
||||
Model: "Xeon Multi-Core",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
|
@ -0,0 +1,40 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
var quit chan struct{}
|
||||
|
||||
// Start workers of database
|
||||
// WARNING: Do not override this function
|
||||
// you should use New()
|
||||
func Start(conn Connection, config *runtime.Config) {
|
||||
quit = make(chan struct{})
|
||||
go deleteWorker(conn, config.Database.DeleteInterval.Duration, config.Database.DeleteAfter.Duration)
|
||||
}
|
||||
|
||||
func Close(conn Connection) {
|
||||
if quit != nil {
|
||||
close(quit)
|
||||
}
|
||||
if conn != nil {
|
||||
conn.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// prunes node-specific data periodically
|
||||
func deleteWorker(conn Connection, deleteInterval time.Duration, deleteAfter time.Duration) {
|
||||
ticker := time.NewTicker(deleteInterval)
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
conn.DeleteNode(deleteAfter)
|
||||
case <-quit:
|
||||
ticker.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package logging
|
||||
|
||||
/**
|
||||
* This database type is just for,
|
||||
* - debugging without a influxconn
|
||||
* - example for other developers for new databases
|
||||
*/
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/database"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
type Connection struct {
|
||||
database.Connection
|
||||
config Config
|
||||
file *os.File
|
||||
}
|
||||
|
||||
type Config map[string]interface{}
|
||||
|
||||
func (c Config) Enable() bool {
|
||||
return c["enable"].(bool)
|
||||
}
|
||||
func (c Config) Path() string {
|
||||
return c["path"].(string)
|
||||
}
|
||||
|
||||
func init() {
|
||||
database.AddDatabaseType("logging", Connect)
|
||||
}
|
||||
|
||||
func Connect(configuration interface{}) (database.Connection, error) {
|
||||
var config Config
|
||||
config = configuration.(map[string]interface{})
|
||||
if !config.Enable() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(config.Path(), os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Connection{config: config, file: file}, nil
|
||||
}
|
||||
|
||||
func (conn *Connection) AddNode(nodeID string, node *runtime.Node) {
|
||||
conn.log("AddNode: [", nodeID, "] clients: ", node.Statistics.Clients.Total)
|
||||
}
|
||||
|
||||
func (conn *Connection) AddStatistics(stats *runtime.GlobalStats, time time.Time) {
|
||||
conn.log("AddStatistics: [", time.String(), "] nodes: ", stats.Nodes, ", clients: ", stats.Clients, " models: ", len(stats.Models))
|
||||
}
|
||||
|
||||
func (conn *Connection) DeleteNode(deleteAfter time.Duration) {
|
||||
conn.log("DeleteNode")
|
||||
}
|
||||
|
||||
func (conn *Connection) Close() {
|
||||
conn.log("Close")
|
||||
conn.file.Close()
|
||||
}
|
||||
|
||||
func (conn *Connection) log(v ...interface{}) {
|
||||
log.Println(v)
|
||||
conn.file.WriteString(fmt.Sprintln("[", time.Now().String(), "]", v))
|
||||
}
|
|
@ -1,8 +1,10 @@
|
|||
package models
|
||||
package meshviewer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// Graph a struct for all links between the nodes
|
||||
|
@ -40,7 +42,7 @@ type graphBuilder struct {
|
|||
}
|
||||
|
||||
// BuildGraph transform from nodes (Neighbours) to Graph
|
||||
func (nodes *Nodes) BuildGraph() *Graph {
|
||||
func BuildGraph(nodes *runtime.Nodes) *Graph {
|
||||
builder := &graphBuilder{
|
||||
macToID: make(map[string]string),
|
||||
idToMac: make(map[string]string),
|
||||
|
@ -56,7 +58,7 @@ func (nodes *Nodes) BuildGraph() *Graph {
|
|||
return graph
|
||||
}
|
||||
|
||||
func (builder *graphBuilder) readNodes(nodes map[string]*Node) {
|
||||
func (builder *graphBuilder) readNodes(nodes map[string]*runtime.Node) {
|
||||
// Fill mac->id map
|
||||
for sourceID, node := range nodes {
|
||||
if nodeinfo := node.Nodeinfo; nodeinfo != nil {
|
||||
|
@ -90,7 +92,7 @@ func (builder *graphBuilder) readNodes(nodes map[string]*Node) {
|
|||
|
||||
// Add links
|
||||
for sourceID, node := range nodes {
|
||||
if node.Flags.Online {
|
||||
if node.Online {
|
||||
if neighbours := node.Neighbours; neighbours != nil {
|
||||
// Batman neighbours
|
||||
for _, batadvNeighbours := range neighbours.Batadv {
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package meshviewer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -8,6 +8,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
type TestNode struct {
|
||||
|
@ -19,7 +20,7 @@ func TestGenerateGraph(t *testing.T) {
|
|||
assert := assert.New(t)
|
||||
nodes := testGetNodesByFile("node1.json", "node2.json", "node3.json", "node4.json")
|
||||
|
||||
graph := nodes.BuildGraph()
|
||||
graph := BuildGraph(nodes)
|
||||
assert.NotNil(graph)
|
||||
assert.Equal(1, graph.Version, "Wrong Version")
|
||||
assert.NotNil(graph.Batadv, "no Batadv")
|
||||
|
@ -30,10 +31,10 @@ func TestGenerateGraph(t *testing.T) {
|
|||
// TODO more tests required
|
||||
}
|
||||
|
||||
func testGetNodesByFile(files ...string) *Nodes {
|
||||
func testGetNodesByFile(files ...string) *runtime.Nodes {
|
||||
|
||||
nodes := &Nodes{
|
||||
List: make(map[string]*Node),
|
||||
nodes := &runtime.Nodes{
|
||||
List: make(map[string]*runtime.Node),
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
|
@ -47,17 +48,17 @@ func testGetNodesByFile(files ...string) *Nodes {
|
|||
return nodes
|
||||
}
|
||||
|
||||
func testGetNodeByFile(filename string) *Node {
|
||||
func testGetNodeByFile(filename string) *runtime.Node {
|
||||
testnode := &TestNode{}
|
||||
testfile(filename, testnode)
|
||||
return &Node{
|
||||
return &runtime.Node{
|
||||
Nodeinfo: testnode.Nodeinfo,
|
||||
Neighbours: testnode.Neighbours,
|
||||
}
|
||||
}
|
||||
|
||||
func testfile(name string, obj interface{}) {
|
||||
file, err := ioutil.ReadFile("testdata/" + name)
|
||||
file, err := ioutil.ReadFile("../runtime/testdata/" + name)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
|
@ -21,22 +21,6 @@ type Flags struct {
|
|||
Gateway bool `json:"gateway"`
|
||||
}
|
||||
|
||||
// NodesV1 struct, to support legacy meshviewer (which are in master branch)
|
||||
// i.e. https://github.com/ffnord/meshviewer/tree/master
|
||||
type NodesV1 struct {
|
||||
Version int `json:"version"`
|
||||
Timestamp jsontime.Time `json:"timestamp"` // Timestamp of the generation
|
||||
List map[string]*Node `json:"nodes"` // the current nodemap, indexed by node ID
|
||||
}
|
||||
|
||||
// NodesV2 struct, to support new version of meshviewer (which are in legacy develop branch or newer)
|
||||
// i.e. https://github.com/ffnord/meshviewer/tree/dev or https://github.com/ffrgb/meshviewer/tree/develop
|
||||
type NodesV2 struct {
|
||||
Version int `json:"version"`
|
||||
Timestamp jsontime.Time `json:"timestamp"` // Timestamp of the generation
|
||||
List []*Node `json:"nodes"` // the current nodemap, as array
|
||||
}
|
||||
|
||||
// Statistics a meshviewer spezifisch struct, diffrent from respondd
|
||||
type Statistics struct {
|
||||
NodeID string `json:"node_id"`
|
|
@ -0,0 +1,117 @@
|
|||
package meshviewer
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/jsontime"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// NodesV1 struct, to support legacy meshviewer (which are in master branch)
|
||||
// i.e. https://github.com/ffnord/meshviewer/tree/master
|
||||
type NodesV1 struct {
|
||||
Version int `json:"version"`
|
||||
Timestamp jsontime.Time `json:"timestamp"` // Timestamp of the generation
|
||||
List map[string]*Node `json:"nodes"` // the current nodemap, indexed by node ID
|
||||
}
|
||||
|
||||
// NodesV2 struct, to support new version of meshviewer (which are in legacy develop branch or newer)
|
||||
// i.e. https://github.com/ffnord/meshviewer/tree/dev or https://github.com/ffrgb/meshviewer/tree/develop
|
||||
type NodesV2 struct {
|
||||
Version int `json:"version"`
|
||||
Timestamp jsontime.Time `json:"timestamp"` // Timestamp of the generation
|
||||
List []*Node `json:"nodes"` // the current nodemap, as array
|
||||
}
|
||||
|
||||
// GetNodesV1 transform data to legacy meshviewer
|
||||
func GetNodesV1(nodes *runtime.Nodes) *NodesV1 {
|
||||
meshviewerNodes := &NodesV1{
|
||||
Version: 1,
|
||||
List: make(map[string]*Node),
|
||||
Timestamp: jsontime.Now(),
|
||||
}
|
||||
|
||||
for nodeID := range nodes.List {
|
||||
nodeOrigin := nodes.List[nodeID]
|
||||
|
||||
if nodeOrigin.Statistics == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
node := &Node{
|
||||
Firstseen: nodeOrigin.Firstseen,
|
||||
Lastseen: nodeOrigin.Lastseen,
|
||||
Flags: Flags{
|
||||
Online: nodeOrigin.Online,
|
||||
Gateway: nodeOrigin.Gateway,
|
||||
},
|
||||
Nodeinfo: nodeOrigin.Nodeinfo,
|
||||
}
|
||||
node.Statistics = NewStatistics(nodeOrigin.Statistics)
|
||||
meshviewerNodes.List[nodeID] = node
|
||||
}
|
||||
return meshviewerNodes
|
||||
}
|
||||
|
||||
// GetNodesV2 transform data to modern meshviewers
|
||||
func GetNodesV2(nodes *runtime.Nodes) *NodesV2 {
|
||||
meshviewerNodes := &NodesV2{
|
||||
Version: 2,
|
||||
Timestamp: jsontime.Now(),
|
||||
}
|
||||
|
||||
for nodeID := range nodes.List {
|
||||
nodeOrigin := nodes.List[nodeID]
|
||||
if nodeOrigin.Statistics == nil {
|
||||
continue
|
||||
}
|
||||
node := &Node{
|
||||
Firstseen: nodeOrigin.Firstseen,
|
||||
Lastseen: nodeOrigin.Lastseen,
|
||||
Flags: Flags{
|
||||
Online: nodeOrigin.Online,
|
||||
Gateway: nodeOrigin.Gateway,
|
||||
},
|
||||
Nodeinfo: nodeOrigin.Nodeinfo,
|
||||
}
|
||||
node.Statistics = NewStatistics(nodeOrigin.Statistics)
|
||||
meshviewerNodes.List = append(meshviewerNodes.List, node)
|
||||
}
|
||||
return meshviewerNodes
|
||||
}
|
||||
|
||||
// Start all services to manage Nodes
|
||||
func Start(config *runtime.Config, nodes *runtime.Nodes) {
|
||||
go worker(config, nodes)
|
||||
}
|
||||
|
||||
// Periodically saves the cached DB to json file
|
||||
func worker(config *runtime.Config, nodes *runtime.Nodes) {
|
||||
c := time.Tick(config.Nodes.SaveInterval.Duration)
|
||||
|
||||
for range c {
|
||||
saveMeshviewer(config, nodes)
|
||||
}
|
||||
}
|
||||
|
||||
func saveMeshviewer(config *runtime.Config, nodes *runtime.Nodes) {
|
||||
// Locking foo
|
||||
nodes.RLock()
|
||||
defer nodes.RUnlock()
|
||||
if path := config.Meshviewer.NodesPath; path != "" {
|
||||
version := config.Meshviewer.Version
|
||||
switch version {
|
||||
case 1:
|
||||
runtime.SaveJSON(GetNodesV1(nodes), path)
|
||||
case 2:
|
||||
runtime.SaveJSON(GetNodesV2(nodes), path)
|
||||
default:
|
||||
log.Panicf("invalid nodes version: %d", version)
|
||||
}
|
||||
}
|
||||
|
||||
if path := config.Meshviewer.GraphPath; path != "" {
|
||||
runtime.SaveJSON(BuildGraph(nodes), path)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
package meshviewer
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
func TestNodesV1(t *testing.T) {
|
||||
nodes := GetNodesV1(createTestNodes())
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.Len(nodes.List, 2)
|
||||
}
|
||||
func TestNodesV2(t *testing.T) {
|
||||
nodes := GetNodesV2(createTestNodes())
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.Len(nodes.List, 2)
|
||||
}
|
||||
|
||||
func createTestNodes() *runtime.Nodes {
|
||||
nodes := runtime.NewNodes(&runtime.Config{})
|
||||
|
||||
nodeData := &data.ResponseData{
|
||||
Statistics: &data.Statistics{
|
||||
Clients: data.Clients{
|
||||
Total: 23,
|
||||
},
|
||||
},
|
||||
NodeInfo: &data.NodeInfo{
|
||||
Hardware: data.Hardware{
|
||||
Model: "TP-Link 841",
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeData.NodeInfo.Software.Firmware.Release = "2016.1.6+entenhausen1"
|
||||
nodes.Update("abcdef012345", nodeData)
|
||||
|
||||
nodes.Update("112233445566", &data.ResponseData{
|
||||
Statistics: &data.Statistics{
|
||||
Clients: data.Clients{
|
||||
Total: 2,
|
||||
},
|
||||
},
|
||||
NodeInfo: &data.NodeInfo{
|
||||
Hardware: data.Hardware{
|
||||
Model: "TP-Link 841",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
nodes.Update("0xdeadbeef0x", &data.ResponseData{
|
||||
NodeInfo: &data.NodeInfo{
|
||||
VPN: true,
|
||||
Hardware: data.Hardware{
|
||||
Model: "Xeon Multi-Core",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
|
@ -1,56 +0,0 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/influxdata/toml"
|
||||
)
|
||||
|
||||
//Config the config File of this daemon
|
||||
type Config struct {
|
||||
Respondd struct {
|
||||
Enable bool
|
||||
Interface string
|
||||
Port int
|
||||
CollectInterval Duration
|
||||
}
|
||||
Webserver struct {
|
||||
Enable bool
|
||||
Bind string
|
||||
Webroot string
|
||||
}
|
||||
Nodes struct {
|
||||
Enable bool
|
||||
NodesVersion int
|
||||
NodesPath string
|
||||
GraphPath string
|
||||
StatePath string
|
||||
SaveInterval Duration // Save nodes periodically
|
||||
OfflineAfter Duration // Set node to offline if not seen within this period
|
||||
PruneAfter Duration // Remove nodes after n days of inactivity
|
||||
}
|
||||
Influxdb struct {
|
||||
Enable bool
|
||||
Address string
|
||||
Database string
|
||||
Username string
|
||||
Password string
|
||||
DeleteInterval Duration // Delete stats of nodes every n minutes
|
||||
DeleteAfter Duration // Delete stats of nodes till now-deletetill n minutes
|
||||
}
|
||||
}
|
||||
|
||||
// ReadConfigFile reads a config model from path of a yml file
|
||||
func ReadConfigFile(path string) *Config {
|
||||
config := &Config{}
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := toml.Unmarshal(file, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
package models
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
config := ReadConfigFile("../config_example.toml")
|
||||
assert.NotNil(config)
|
||||
|
||||
assert.True(config.Respondd.Enable)
|
||||
assert.Equal("eth0", config.Respondd.Interface)
|
||||
assert.Equal(time.Minute, config.Respondd.CollectInterval.Duration)
|
||||
|
||||
assert.Equal(2, config.Nodes.NodesVersion)
|
||||
assert.Equal("/var/www/html/meshviewer/data/nodes.json", config.Nodes.NodesPath)
|
||||
assert.Equal(time.Hour*24*7, config.Nodes.PruneAfter.Duration)
|
||||
}
|
|
@ -4,15 +4,15 @@ import (
|
|||
"bytes"
|
||||
"compress/flate"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"time"
|
||||
"fmt"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/database"
|
||||
"github.com/FreifunkBremen/yanic/jsontime"
|
||||
"github.com/FreifunkBremen/yanic/models"
|
||||
"github.com/FreifunkBremen/yanic/runtime"
|
||||
)
|
||||
|
||||
// Collector for a specificle respond messages
|
||||
|
@ -20,14 +20,14 @@ type Collector struct {
|
|||
connection *net.UDPConn // UDP socket
|
||||
queue chan *Response // received responses
|
||||
iface string
|
||||
db *database.DB
|
||||
nodes *models.Nodes
|
||||
db database.Connection
|
||||
nodes *runtime.Nodes
|
||||
interval time.Duration // Interval for multicast packets
|
||||
stop chan interface{}
|
||||
}
|
||||
|
||||
// NewCollector creates a Collector struct
|
||||
func NewCollector(db *database.DB, nodes *models.Nodes, iface string, port int) *Collector {
|
||||
func NewCollector(db database.Connection, nodes *runtime.Nodes, iface string, port int) *Collector {
|
||||
linkLocalAddr, err := getLinkLocalAddr(iface)
|
||||
if err != nil {
|
||||
log.Panic(err)
|
||||
|
@ -125,7 +125,7 @@ func (coll *Collector) sendUnicasts(seenBefore jsontime.Time) {
|
|||
seenAfter := seenBefore.Add(-time.Minute * 10)
|
||||
|
||||
// Select online nodes that has not been seen recently
|
||||
nodes := coll.nodes.Select(func(n *models.Node) bool {
|
||||
nodes := coll.nodes.Select(func(n *runtime.Node) bool {
|
||||
return n.Lastseen.After(seenAfter) && n.Lastseen.Before(seenBefore) && n.Address != nil
|
||||
})
|
||||
|
||||
|
@ -210,7 +210,7 @@ func (coll *Collector) saveResponse(addr net.UDPAddr, res *data.ResponseData) {
|
|||
|
||||
// Store statistics in InfluxDB
|
||||
if coll.db != nil && node.Statistics != nil {
|
||||
coll.db.Add(nodeID, node)
|
||||
coll.db.AddNode(nodeID, node)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,9 +249,7 @@ func (coll *Collector) globalStatsWorker() {
|
|||
|
||||
// saves global statistics
|
||||
func (coll *Collector) saveGlobalStats() {
|
||||
stats := models.NewGlobalStats(coll.nodes)
|
||||
stats := runtime.NewGlobalStats(coll.nodes)
|
||||
|
||||
coll.db.AddPoint(database.MeasurementGlobal, nil, stats.Fields(), time.Now())
|
||||
coll.db.AddCounterMap(database.MeasurementFirmware, stats.Firmwares)
|
||||
coll.db.AddCounterMap(database.MeasurementModel, stats.Models)
|
||||
coll.db.AddStatistics(stats, time.Now())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
)
|
||||
|
||||
//Config the config File of this daemon
|
||||
type Config struct {
|
||||
Respondd struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Interface string `toml:"interface"`
|
||||
Port int `toml:"port"`
|
||||
CollectInterval Duration `toml:"collect_interval"`
|
||||
}
|
||||
Webserver struct {
|
||||
Enable bool `toml:"enable"`
|
||||
Bind string `toml:"bind"`
|
||||
Webroot string `toml:"webroot"`
|
||||
}
|
||||
Nodes struct {
|
||||
Enable bool `toml:"enable"`
|
||||
StatePath string `toml:"state_path"`
|
||||
SaveInterval Duration `toml:"save_interval"` // Save nodes periodically
|
||||
OfflineAfter Duration `toml:"offline_after"` // Set node to offline if not seen within this period
|
||||
PruneAfter Duration `toml:"prune_after"` // Remove nodes after n days of inactivity
|
||||
}
|
||||
Meshviewer struct {
|
||||
Version int `toml:"version"`
|
||||
NodesPath string `toml:"nodes_path"`
|
||||
GraphPath string `toml:"graph_path"`
|
||||
}
|
||||
Database struct {
|
||||
DeleteInterval Duration `toml:"delete_interval"` // Delete stats of nodes every n minutes
|
||||
DeleteAfter Duration `toml:"delete_after"` // Delete stats of nodes till now-deletetill n minutes
|
||||
Connection map[string][]interface{}
|
||||
}
|
||||
}
|
||||
|
||||
// ReadConfigFile reads a config model from path of a yml file
|
||||
func ReadConfigFile(path string) (config *Config, err error) {
|
||||
config = &Config{}
|
||||
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = toml.Unmarshal(file, config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestReadConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
config, err := ReadConfigFile("../config_example.toml")
|
||||
assert.NoError(err, "no error during reading")
|
||||
assert.NotNil(config)
|
||||
|
||||
assert.True(config.Respondd.Enable)
|
||||
assert.Equal("eth0", config.Respondd.Interface)
|
||||
assert.Equal(time.Minute, config.Respondd.CollectInterval.Duration)
|
||||
|
||||
assert.Equal(2, config.Meshviewer.Version)
|
||||
assert.Equal("/var/www/html/meshviewer/data/nodes.json", config.Meshviewer.NodesPath)
|
||||
|
||||
assert.Equal(time.Hour*24*7, config.Nodes.PruneAfter.Duration)
|
||||
|
||||
assert.Equal(time.Hour*24*7, config.Database.DeleteAfter.Duration)
|
||||
|
||||
var influxdb map[string]interface{}
|
||||
dbs := config.Database.Connection["influxdb"]
|
||||
assert.Len(dbs, 1, "more influxdb are given")
|
||||
influxdb = dbs[0].(map[string]interface{})
|
||||
assert.Equal(influxdb["database"], "ffhb")
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -16,15 +16,21 @@ type Duration struct {
|
|||
}
|
||||
|
||||
// UnmarshalTOML parses a duration string.
|
||||
func (d *Duration) UnmarshalTOML(data []byte) error {
|
||||
|
||||
func (d *Duration) UnmarshalTOML(dataInterface interface{}) error {
|
||||
var data string
|
||||
switch dataInterface.(type) {
|
||||
case string:
|
||||
data = dataInterface.(string)
|
||||
default:
|
||||
return fmt.Errorf("invalid duration: \"%s\"", dataInterface)
|
||||
}
|
||||
// " + int + unit + "
|
||||
if len(data) < 4 {
|
||||
return fmt.Errorf("invalid duration: %s", data)
|
||||
if len(data) < 2 {
|
||||
return fmt.Errorf("invalid duration: \"%s\"", data)
|
||||
}
|
||||
|
||||
unit := data[len(data)-2]
|
||||
value, err := strconv.Atoi(string(data[1 : len(data)-2]))
|
||||
unit := data[len(data)-1]
|
||||
value, err := strconv.Atoi(string(data[:len(data)-1]))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to parse duration %s: %s", data, err)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
@ -34,7 +34,7 @@ func TestDuration(t *testing.T) {
|
|||
for _, test := range tests {
|
||||
|
||||
d := Duration{}
|
||||
err := d.UnmarshalTOML([]byte("\"" + test.input + "\""))
|
||||
err := d.UnmarshalTOML(test.input)
|
||||
duration := d.Duration
|
||||
|
||||
if test.err == "" {
|
|
@ -0,0 +1,20 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/jsontime"
|
||||
)
|
||||
|
||||
// Node struct
|
||||
type Node struct {
|
||||
Address net.IP `json:"address"` // the last known IP address
|
||||
Firstseen jsontime.Time `json:"firstseen"`
|
||||
Lastseen jsontime.Time `json:"lastseen"`
|
||||
Online bool `json:"online"`
|
||||
Gateway bool `json:"gateway"`
|
||||
Statistics *data.Statistics `json:"statistics"`
|
||||
Nodeinfo *data.NodeInfo `json:"nodeinfo"`
|
||||
Neighbours *data.Neighbours `json:"-"`
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -9,7 +9,6 @@ import (
|
|||
|
||||
"github.com/FreifunkBremen/yanic/data"
|
||||
"github.com/FreifunkBremen/yanic/jsontime"
|
||||
"github.com/FreifunkBremen/yanic/meshviewer"
|
||||
)
|
||||
|
||||
// Nodes struct: cache DB of Node's structs
|
||||
|
@ -54,7 +53,7 @@ func (nodes *Nodes) Update(nodeID string, res *data.ResponseData) *Node {
|
|||
nodes.Unlock()
|
||||
|
||||
node.Lastseen = now
|
||||
node.Flags.Online = true
|
||||
node.Online = true
|
||||
|
||||
// Update neighbours
|
||||
if val := res.Neighbours; val != nil {
|
||||
|
@ -64,7 +63,7 @@ func (nodes *Nodes) Update(nodeID string, res *data.ResponseData) *Node {
|
|||
// Update nodeinfo
|
||||
if val := res.NodeInfo; val != nil {
|
||||
node.Nodeinfo = val
|
||||
node.Flags.Gateway = val.VPN
|
||||
node.Gateway = val.VPN
|
||||
}
|
||||
|
||||
// Update statistics
|
||||
|
@ -81,57 +80,6 @@ func (nodes *Nodes) Update(nodeID string, res *data.ResponseData) *Node {
|
|||
return node
|
||||
}
|
||||
|
||||
// GetNodesV1 transform data to legacy meshviewer
|
||||
func (nodes *Nodes) GetNodesV1() *meshviewer.NodesV1 {
|
||||
meshviewerNodes := &meshviewer.NodesV1{
|
||||
Version: 1,
|
||||
List: make(map[string]*meshviewer.Node),
|
||||
Timestamp: jsontime.Now(),
|
||||
}
|
||||
|
||||
for nodeID := range nodes.List {
|
||||
nodeOrigin := nodes.List[nodeID]
|
||||
|
||||
if nodeOrigin.Statistics == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
node := &meshviewer.Node{
|
||||
Firstseen: nodeOrigin.Firstseen,
|
||||
Lastseen: nodeOrigin.Lastseen,
|
||||
Flags: nodeOrigin.Flags,
|
||||
Nodeinfo: nodeOrigin.Nodeinfo,
|
||||
}
|
||||
node.Statistics = meshviewer.NewStatistics(nodeOrigin.Statistics)
|
||||
meshviewerNodes.List[nodeID] = node
|
||||
}
|
||||
return meshviewerNodes
|
||||
}
|
||||
|
||||
// GetNodesV2 transform data to modern meshviewers
|
||||
func (nodes *Nodes) GetNodesV2() *meshviewer.NodesV2 {
|
||||
meshviewerNodes := &meshviewer.NodesV2{
|
||||
Version: 2,
|
||||
Timestamp: jsontime.Now(),
|
||||
}
|
||||
|
||||
for nodeID := range nodes.List {
|
||||
nodeOrigin := nodes.List[nodeID]
|
||||
if nodeOrigin.Statistics == nil {
|
||||
continue
|
||||
}
|
||||
node := &meshviewer.Node{
|
||||
Firstseen: nodeOrigin.Firstseen,
|
||||
Lastseen: nodeOrigin.Lastseen,
|
||||
Flags: nodeOrigin.Flags,
|
||||
Nodeinfo: nodeOrigin.Nodeinfo,
|
||||
}
|
||||
node.Statistics = meshviewer.NewStatistics(nodeOrigin.Statistics)
|
||||
meshviewerNodes.List = append(meshviewerNodes.List, node)
|
||||
}
|
||||
return meshviewerNodes
|
||||
}
|
||||
|
||||
// Select selects a list of nodes to be returned
|
||||
func (nodes *Nodes) Select(f func(*Node) bool) []*Node {
|
||||
nodes.RLock()
|
||||
|
@ -180,7 +128,7 @@ func (nodes *Nodes) expire() {
|
|||
delete(nodes.List, id)
|
||||
} else if node.Lastseen.Before(offlineAfter) {
|
||||
// set to offline
|
||||
node.Flags.Online = false
|
||||
node.Online = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -205,26 +153,11 @@ func (nodes *Nodes) save() {
|
|||
defer nodes.RUnlock()
|
||||
|
||||
// serialize nodes
|
||||
save(nodes, nodes.config.Nodes.StatePath)
|
||||
|
||||
if path := nodes.config.Nodes.NodesPath; path != "" {
|
||||
version := nodes.config.Nodes.NodesVersion
|
||||
switch version {
|
||||
case 1:
|
||||
save(nodes.GetNodesV1(), path)
|
||||
case 2:
|
||||
save(nodes.GetNodesV2(), path)
|
||||
default:
|
||||
log.Panicf("invalid nodes version: %d", version)
|
||||
}
|
||||
}
|
||||
|
||||
if path := nodes.config.Nodes.GraphPath; path != "" {
|
||||
save(nodes.BuildGraph(), path)
|
||||
}
|
||||
SaveJSON(nodes, nodes.config.Nodes.StatePath)
|
||||
}
|
||||
|
||||
func save(input interface{}, outputFile string) {
|
||||
// SaveJSON to path
|
||||
func SaveJSON(input interface{}, outputFile string) {
|
||||
tmpFile := outputFile + ".tmp"
|
||||
|
||||
f, err := os.OpenFile(tmpFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
|
@ -38,11 +38,11 @@ func TestExpire(t *testing.T) {
|
|||
|
||||
// one offline?
|
||||
assert.NotNil(nodes.List["offline"])
|
||||
assert.False(nodes.List["offline"].Flags.Online)
|
||||
assert.False(nodes.List["offline"].Online)
|
||||
|
||||
// one online?
|
||||
assert.NotNil(nodes.List["online"])
|
||||
assert.True(nodes.List["online"].Flags.Online)
|
||||
assert.True(nodes.List["online"].Online)
|
||||
}
|
||||
|
||||
func TestLoadAndSave(t *testing.T) {
|
||||
|
@ -55,7 +55,7 @@ func TestLoadAndSave(t *testing.T) {
|
|||
nodes.load()
|
||||
|
||||
tmpfile, _ := ioutil.TempFile("/tmp", "nodes")
|
||||
save(nodes, tmpfile.Name())
|
||||
SaveJSON(nodes, tmpfile.Name())
|
||||
os.Remove(tmpfile.Name())
|
||||
|
||||
assert.Len(nodes.List, 1)
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
// CounterMap to manage multiple values
|
||||
type CounterMap map[string]uint32
|
||||
|
@ -25,7 +25,7 @@ func NewGlobalStats(nodes *Nodes) (result *GlobalStats) {
|
|||
|
||||
nodes.Lock()
|
||||
for _, node := range nodes.List {
|
||||
if node.Flags.Online {
|
||||
if node.Online {
|
||||
result.Nodes++
|
||||
if stats := node.Statistics; stats != nil {
|
||||
result.Clients += stats.Clients.Total
|
||||
|
@ -33,7 +33,7 @@ func NewGlobalStats(nodes *Nodes) (result *GlobalStats) {
|
|||
result.ClientsWifi5 += stats.Clients.Wifi5
|
||||
result.ClientsWifi += stats.Clients.Wifi
|
||||
}
|
||||
if node.Flags.Gateway {
|
||||
if node.Gateway {
|
||||
result.Gateways++
|
||||
}
|
||||
if info := node.Nodeinfo; info != nil {
|
||||
|
@ -54,15 +54,3 @@ func (m CounterMap) Increment(key string) {
|
|||
m[key] = val + 1
|
||||
}
|
||||
}
|
||||
|
||||
// Fields returns fields for InfluxDB
|
||||
func (stats *GlobalStats) Fields() map[string]interface{} {
|
||||
return map[string]interface{}{
|
||||
"nodes": stats.Nodes,
|
||||
"gateways": stats.Gateways,
|
||||
"clients.total": stats.Clients,
|
||||
"clients.wifi": stats.ClientsWifi,
|
||||
"clients.wifi24": stats.ClientsWifi24,
|
||||
"clients.wifi5": stats.ClientsWifi5,
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package models
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
@ -24,24 +24,6 @@ func TestGlobalStats(t *testing.T) {
|
|||
// check firmwares
|
||||
assert.Len(stats.Firmwares, 1)
|
||||
assert.EqualValues(1, stats.Firmwares["2016.1.6+entenhausen1"])
|
||||
|
||||
fields := stats.Fields()
|
||||
|
||||
// check fields
|
||||
assert.EqualValues(3, fields["nodes"])
|
||||
}
|
||||
|
||||
func TestNodesV1(t *testing.T) {
|
||||
nodes := createTestNodes().GetNodesV1()
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.Len(nodes.List, 2)
|
||||
}
|
||||
func TestNodesV2(t *testing.T) {
|
||||
nodes := createTestNodes().GetNodesV2()
|
||||
|
||||
assert := assert.New(t)
|
||||
assert.Len(nodes.List, 2)
|
||||
}
|
||||
|
||||
func createTestNodes() *Nodes {
|
Loading…
Reference in New Issue