yanic/database/database.go

165 lines
3.5 KiB
Go
Raw Normal View History

2016-10-03 19:55:37 +02:00
package database
import (
2016-11-26 13:11:21 +01:00
"fmt"
2016-10-03 19:55:37 +02:00
"log"
"sync"
"time"
"github.com/FreifunkBremen/respond-collector/models"
"github.com/influxdata/influxdb/client/v2"
imodels "github.com/influxdata/influxdb/models"
)
const (
MeasurementNode = "node" // Measurement for per-node statistics
MeasurementGlobal = "global" // Measurement for summarized global statistics
MeasurementFirmware = "firmware" // Measurement for firmware statistics
MeasurementModel = "model" // Measurement for model statistics
batchMaxSize = 500
2017-01-30 00:44:56 +01:00
batchTimeout = 5 * time.Second
2016-10-03 19:55:37 +02:00
)
type DB struct {
config *models.Config
client client.Client
points chan *client.Point
wg sync.WaitGroup
2016-11-26 13:11:21 +01:00
quit chan struct{}
2016-10-03 19:55:37 +02:00
}
func New(config *models.Config) *DB {
// Make client
c, err := client.NewHTTPClient(client.HTTPConfig{
Addr: config.Influxdb.Address,
2016-10-03 19:55:37 +02:00
Username: config.Influxdb.Username,
Password: config.Influxdb.Password,
})
if err != nil {
panic(err)
}
db := &DB{
config: config,
client: c,
points: make(chan *client.Point, 1000),
2016-11-26 13:11:21 +01:00
quit: make(chan struct{}),
2016-10-03 19:55:37 +02:00
}
db.wg.Add(1)
2016-11-26 13:11:21 +01:00
go db.addWorker()
go db.deleteWorker()
2016-10-03 19:55:37 +02:00
return db
}
2016-11-26 13:11:21 +01:00
func (db *DB) DeletePoints() {
query := fmt.Sprintf("delete from %s where time < now() - %ds", MeasurementNode, db.config.Influxdb.DeleteAfter.Duration/time.Second)
2016-11-26 13:11:21 +01:00
db.client.Query(client.NewQuery(query, db.config.Influxdb.Database, "m"))
}
2016-10-03 19:55:37 +02:00
func (db *DB) AddPoint(name string, tags imodels.Tags, fields imodels.Fields, time time.Time) {
point, err := client.NewPoint(name, tags.Map(), fields, time)
if err != nil {
panic(err)
}
db.points <- point
}
// Saves the values of a CounterMap in the database.
// The key are used as 'value' tag.
// The value is used as 'counter' field.
func (db *DB) AddCounterMap(name string, m models.CounterMap) {
now := time.Now()
for key, count := range m {
db.AddPoint(
name,
imodels.Tags{
imodels.Tag{Key: []byte("value"), Value: []byte(key)},
},
imodels.Fields{"count": count},
now,
)
}
}
2016-10-03 19:55:37 +02:00
// Add data for a single node
func (db *DB) Add(nodeID string, node *models.Node) {
2016-10-03 19:55:37 +02:00
tags, fields := node.ToInflux()
db.AddPoint(MeasurementNode, tags, fields, time.Now())
}
// Close all connection and clean up
2016-10-03 19:55:37 +02:00
func (db *DB) Close() {
2016-11-26 13:11:21 +01:00
close(db.quit)
2016-10-03 19:55:37 +02:00
close(db.points)
db.wg.Wait()
db.client.Close()
}
2016-12-15 09:52:12 +01:00
// prunes node-specific data periodically
2016-11-26 13:11:21 +01:00
func (db *DB) deleteWorker() {
ticker := time.NewTicker(db.config.Influxdb.DeleteInterval.Duration)
2016-11-26 13:11:21 +01:00
for {
select {
case <-ticker.C:
db.DeletePoints()
case <-db.quit:
ticker.Stop()
return
}
}
}
2016-10-03 19:55:37 +02:00
// stores data points in batches into the influxdb
2016-11-26 13:11:21 +01:00
func (db *DB) addWorker() {
2016-10-03 19:55:37 +02:00
bpConfig := client.BatchPointsConfig{
Database: db.config.Influxdb.Database,
Precision: "m",
}
var bp client.BatchPoints
var err error
var writeNow, closed bool
2017-01-30 00:44:56 +01:00
timer := time.NewTimer(batchTimeout)
2016-10-03 19:55:37 +02:00
for !closed {
// wait for new points
select {
case point, ok := <-db.points:
if ok {
if bp == nil {
// create new batch
2017-01-30 00:44:56 +01:00
timer.Reset(batchTimeout)
2016-10-03 19:55:37 +02:00
if bp, err = client.NewBatchPoints(bpConfig); err != nil {
log.Fatal(err)
}
}
bp.AddPoint(point)
} else {
closed = true
}
case <-timer.C:
if bp == nil {
2017-01-30 00:44:56 +01:00
timer.Reset(batchTimeout)
2016-10-03 19:55:37 +02:00
} else {
writeNow = true
}
}
// write batch now?
2016-10-04 14:54:19 +02:00
if bp != nil && (writeNow || closed || len(bp.Points()) >= batchMaxSize) {
2016-10-03 19:55:37 +02:00
log.Println("saving", len(bp.Points()), "points")
if err = db.client.Write(bp); err != nil {
log.Fatal(err)
}
writeNow = false
bp = nil
}
}
timer.Stop()
db.wg.Done()
}