2017-04-10 18:54:12 +02:00
|
|
|
package influxdb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-01-17 13:26:16 +01:00
|
|
|
"github.com/bdlm/log"
|
2019-01-15 20:19:55 +01:00
|
|
|
"github.com/influxdata/influxdb1-client/models"
|
2022-03-28 03:56:00 +02:00
|
|
|
client "github.com/influxdata/influxdb1-client/v2"
|
2017-04-10 18:54:12 +02:00
|
|
|
|
|
|
|
"github.com/FreifunkBremen/yanic/database"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2017-11-14 19:33:50 +01:00
|
|
|
MeasurementLink = "link" // Measurement for per-link statistics
|
|
|
|
MeasurementNode = "node" // Measurement for per-node statistics
|
2018-04-19 11:26:08 +02:00
|
|
|
MeasurementDHCP = "dhcp" // Measurement for DHCP server statistics
|
2017-11-14 19:33:50 +01:00
|
|
|
MeasurementGlobal = "global" // Measurement for summarized global statistics
|
|
|
|
CounterMeasurementFirmware = "firmware" // Measurement for firmware statistics
|
|
|
|
CounterMeasurementModel = "model" // Measurement for model statistics
|
|
|
|
CounterMeasurementAutoupdater = "autoupdater" // Measurement for autoupdater
|
2018-01-19 04:37:46 +01:00
|
|
|
batchMaxSize = 1000
|
2017-11-14 19:33:50 +01:00
|
|
|
batchTimeout = 5 * time.Second
|
2017-04-10 18:54:12 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type Connection struct {
|
|
|
|
database.Connection
|
|
|
|
config Config
|
|
|
|
client client.Client
|
|
|
|
points chan *client.Point
|
|
|
|
wg sync.WaitGroup
|
|
|
|
}
|
|
|
|
|
|
|
|
type Config map[string]interface{}
|
|
|
|
|
|
|
|
func (c Config) Address() string {
|
|
|
|
return c["address"].(string)
|
|
|
|
}
|
|
|
|
func (c Config) Database() string {
|
|
|
|
return c["database"].(string)
|
|
|
|
}
|
|
|
|
func (c Config) Username() string {
|
|
|
|
return c["username"].(string)
|
|
|
|
}
|
|
|
|
func (c Config) Password() string {
|
|
|
|
return c["password"].(string)
|
|
|
|
}
|
2018-05-05 23:46:12 +02:00
|
|
|
func (c Config) InsecureSkipVerify() bool {
|
|
|
|
if d, ok := c["insecure_skip_verify"]; ok {
|
|
|
|
return d.(bool)
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2017-06-01 18:17:32 +02:00
|
|
|
func (c Config) Tags() map[string]interface{} {
|
|
|
|
if c["tags"] != nil {
|
|
|
|
return c["tags"].(map[string]interface{})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-04-10 18:54:12 +02:00
|
|
|
|
|
|
|
func init() {
|
2017-04-17 20:42:06 +02:00
|
|
|
database.RegisterAdapter("influxdb", Connect)
|
2017-04-10 18:54:12 +02:00
|
|
|
}
|
2017-12-31 05:26:17 +01:00
|
|
|
func Connect(configuration map[string]interface{}) (database.Connection, error) {
|
2022-03-28 03:56:00 +02:00
|
|
|
config := Config(configuration)
|
2017-12-31 05:26:17 +01:00
|
|
|
|
2017-04-10 18:54:12 +02:00
|
|
|
// Make client
|
|
|
|
c, err := client.NewHTTPClient(client.HTTPConfig{
|
2018-05-05 23:46:12 +02:00
|
|
|
Addr: config.Address(),
|
|
|
|
Username: config.Username(),
|
|
|
|
Password: config.Password(),
|
|
|
|
InsecureSkipVerify: config.InsecureSkipVerify(),
|
2017-04-10 18:54:12 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2018-01-19 04:37:46 +01:00
|
|
|
_, _, err = c.Ping(time.Millisecond * 50)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2017-04-10 18:54:12 +02:00
|
|
|
db := &Connection{
|
|
|
|
config: config,
|
|
|
|
client: c,
|
2018-01-19 04:37:46 +01:00
|
|
|
points: make(chan *client.Point, batchMaxSize),
|
2017-04-10 18:54:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
db.wg.Add(1)
|
|
|
|
go db.addWorker()
|
|
|
|
|
|
|
|
return db, nil
|
|
|
|
}
|
|
|
|
|
2017-04-18 03:10:16 +02:00
|
|
|
func (conn *Connection) addPoint(name string, tags models.Tags, fields models.Fields, t ...time.Time) {
|
2017-06-01 18:17:32 +02:00
|
|
|
if configTags := conn.config.Tags(); configTags != nil {
|
|
|
|
for tag, valueInterface := range configTags {
|
|
|
|
if value, ok := valueInterface.(string); ok && tags.Get([]byte(tag)) == nil {
|
|
|
|
tags.SetString(tag, value)
|
|
|
|
} else {
|
2019-01-17 13:26:16 +01:00
|
|
|
log.WithFields(map[string]interface{}{
|
|
|
|
"name": name,
|
|
|
|
"tag": tag,
|
|
|
|
}).Warnf("count not save tag configuration on point")
|
2017-06-01 18:17:32 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 03:10:16 +02:00
|
|
|
point, err := client.NewPoint(name, tags.Map(), fields, t...)
|
2017-04-10 18:54:12 +02:00
|
|
|
if err != nil {
|
2019-01-17 13:26:16 +01:00
|
|
|
log.Panicf("count not save points: %s", err)
|
2017-04-10 18:54:12 +02:00
|
|
|
}
|
|
|
|
conn.points <- point
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close all connection and clean up
|
|
|
|
func (conn *Connection) Close() {
|
|
|
|
close(conn.points)
|
|
|
|
conn.wg.Wait()
|
|
|
|
conn.client.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// stores data points in batches into the influxdb
|
|
|
|
func (conn *Connection) addWorker() {
|
|
|
|
bpConfig := client.BatchPointsConfig{
|
|
|
|
Database: conn.config.Database(),
|
|
|
|
Precision: "m",
|
|
|
|
}
|
|
|
|
|
|
|
|
var bp client.BatchPoints
|
|
|
|
var err error
|
|
|
|
var writeNow, closed bool
|
|
|
|
timer := time.NewTimer(batchTimeout)
|
|
|
|
|
|
|
|
for !closed {
|
|
|
|
// wait for new points
|
|
|
|
select {
|
|
|
|
case point, ok := <-conn.points:
|
|
|
|
if ok {
|
|
|
|
if bp == nil {
|
|
|
|
// create new batch
|
|
|
|
timer.Reset(batchTimeout)
|
|
|
|
if bp, err = client.NewBatchPoints(bpConfig); err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bp.AddPoint(point)
|
|
|
|
} else {
|
|
|
|
closed = true
|
|
|
|
}
|
|
|
|
case <-timer.C:
|
|
|
|
if bp == nil {
|
|
|
|
timer.Reset(batchTimeout)
|
|
|
|
} else {
|
|
|
|
writeNow = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// write batch now?
|
|
|
|
if bp != nil && (writeNow || closed || len(bp.Points()) >= batchMaxSize) {
|
2019-01-17 13:26:16 +01:00
|
|
|
log.WithField("count", len(bp.Points())).Info("saving points")
|
2017-04-10 18:54:12 +02:00
|
|
|
|
|
|
|
if err = conn.client.Write(bp); err != nil {
|
2019-01-17 13:26:16 +01:00
|
|
|
log.Error(err)
|
2017-04-10 18:54:12 +02:00
|
|
|
}
|
|
|
|
writeNow = false
|
|
|
|
bp = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
timer.Stop()
|
|
|
|
conn.wg.Done()
|
|
|
|
}
|