diff --git a/.github/workflows/go.yml b/.github/workflows/build-exporter-go.yml similarity index 100% rename from .github/workflows/go.yml rename to .github/workflows/build-exporter-go.yml diff --git a/.github/workflows/build-metadata-go.yml b/.github/workflows/build-metadata-go.yml new file mode 100644 index 0000000..8e73b77 --- /dev/null +++ b/.github/workflows/build-metadata-go.yml @@ -0,0 +1,44 @@ +# This workflow will build a golang project +# For more information see: https://docs.github.com/en/actions/automating-builds-and-tests/building-and-testing-go + +name: Go + +on: + push: + branches: ["main"] + paths-ignore: + - README.md + - LICENSE + - .github/workflows/container.yml + - Dockerfile + pull_request: + branches: ["main"] + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + GOOS: [linux, windows, darwin] + GOARCH: [amd64, arm64] + steps: + - name: Checkout GitHub Action + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: 1.21 + + - name: Build for ${{ matrix.GOOS }}-${{ matrix.GOARCH }} + working-directory: ./satisfactory-metadata + env: + GOOS: ${{ matrix.GOOS }} + GOARCH: ${{ matrix.GOARCH }} + run: go build -o satisfactory-metadata -ldflags "-s -w" main.go + + - name: Upload artifact + uses: actions/upload-artifact@v3 + with: + name: satisfactory-metadata-${{ matrix.GOOS }}-${{ matrix.GOARCH }} + path: ./satisfactory-metadata/satisfactory-metadata diff --git a/.github/workflows/build-metadata-images.yml b/.github/workflows/build-metadata-images.yml new file mode 100644 index 0000000..f3190af --- /dev/null +++ b/.github/workflows/build-metadata-images.yml @@ -0,0 +1,39 @@ +name: Deploy Images to GHCR + +env: + IMAGE_NAME: ghcr.io/justereseau/satisfactory-metrics + +on: + push: + branches: ["main"] + paths-ignore: + - README.md + - LICENSE + - .github/workflows/go.yml + pull_request: + branches: ["main"] + +jobs: + push-store-image: + runs-on: ubuntu-latest + steps: + - name: Checkout GitHub Action + uses: actions/checkout@v4 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Build and push + uses: docker/build-push-action@v5 + with: + context: ./satisfactory-metadata + platforms: linux/amd64 + push: true + tags: "${{ env.IMAGE_NAME }}:latest,${{ env.IMAGE_NAME }}:${{ github.sha }}" diff --git a/satisfactory-metadata/README.md b/satisfactory-metadata/README.md new file mode 100644 index 0000000..1bd6b9f --- /dev/null +++ b/satisfactory-metadata/README.md @@ -0,0 +1,12 @@ +# An helper that sync Satisfactory meta to a Postges database + +This is an helper that sync Satisfactory meta to a Postges database, in order to be able to query it from Grafana. + +This is an helper for Satisfactory that use [Ficsit Remote Montioring mod](https://ficsit.app/mod/B9bEiZFtaaQZHU) and write the data to a Postgres database. + +This work is a fork of the work of [Jeff Wong](https://github.com/featheredtoast/) on the [Ficsit Remote Monitoring Companion Bundle](https://github.com/featheredtoast/satisfactory-monitoring) that I have adapted to my usecase. + +Basicaly I have keep the part that write to the database; and the Dashboards. +I have edit the database sync part to take advantage of the kubernetes jobs. + +And added a wrapper for building this as a Docker image. diff --git a/satisfactory-metadata/cache_worker.go b/satisfactory-metadata/cache_worker.go new file mode 100644 index 0000000..bd622c7 --- /dev/null +++ b/satisfactory-metadata/cache_worker.go @@ -0,0 +1,198 @@ +package main + +import ( + "context" + "database/sql" + "encoding/json" + "net/http" + + "fmt" + "github.com/benbjohnson/clock" + "time" +) + +var Clock = clock.New() + +type CacheWorker struct { + ctx context.Context + cancel context.CancelFunc + frmBaseUrl string + db *sql.DB + now time.Time +} + +func NewCacheWorker(frmBaseUrl string, db *sql.DB) *CacheWorker { + ctx, cancel := context.WithCancel(context.Background()) + + return &CacheWorker{ + frmBaseUrl: frmBaseUrl, + db: db, + ctx: ctx, + cancel: cancel, + } +} + +func retrieveData(frmAddress string) ([]string, error) { + resp, err := http.Get(frmAddress) + + if err != nil { + return nil, fmt.Errorf("error when parsing json: %s", err) + } + + var content []json.RawMessage + decoder := json.NewDecoder(resp.Body) + err = decoder.Decode(&content) + if err != nil { + return nil, fmt.Errorf("error when parsing json: %s", err) + } + defer resp.Body.Close() + result := []string{} + for _, c := range content { + result = append(result, string(c[:])) + } + return result, nil +} + +func (c *CacheWorker) cacheMetrics(metric string, data []string) (err error) { + tx, err := c.db.Begin() + if err != nil { + return + } + defer func() { + switch err { + case nil: + err = tx.Commit() + default: + tx.Rollback() + } + }() + + delete := `delete from cache where metric = $1;` + _, err = tx.Exec(delete, metric) + if (err != nil) { + return + } + for _, s := range data { + insert := `insert into cache (metric,data) values($1,$2)` + _, err = tx.Exec(insert, metric, s) + if err != nil { + return + } + } + return +} + +func (c *CacheWorker) cacheMetricsWithHistory(metric string, data []string) (err error) { + tx, err := c.db.Begin() + if err != nil { + return + } + defer func() { + switch err { + case nil: + err = tx.Commit() + default: + tx.Rollback() + } + }() + for _, s := range data { + insert := `insert into cache_with_history (metric,data, time) values($1,$2,$3)` + _, err = tx.Exec(insert, metric, s, c.now) + if err != nil { + return + } + } + + //720 = 1 hour, 5 second increments. retain that many rows for every data. + keep := 720 * len(data) + + delete := `delete from cache_with_history where +metric = $1 and +id NOT IN ( +select id from "cache_with_history" where metric = $1 +order by id desc +limit $2 +);` + _, err = tx.Exec(delete, metric, keep) + return +} + +// flush the metric history cache +func (c *CacheWorker) flushMetricHistory() error { + delete := `truncate cache_with_history;` + _, err := c.db.Exec(delete) + if err != nil { + fmt.Println("flush metrics history db error: ", err) + } + return err +} + +func (c *CacheWorker) pullMetrics(metric string, route string, keepHistory bool) error { + data, err := retrieveData(c.frmBaseUrl + route) + if err != nil { + return fmt.Errorf("error when parsing json: %s", err) + } + c.cacheMetrics(metric, data) + if err != nil { + return fmt.Errorf("error when caching metrics %s", err) + } + if keepHistory { + err = c.cacheMetricsWithHistory(metric, data) + if err != nil { + return fmt.Errorf("error when caching metrics history %s", err) + } + } + return nil +} + +func (c *CacheWorker) pullMetricsLog(metric string, route string, keepHistory bool) error { + if err := c.pullMetrics(metric, route, keepHistory); err != nil { + fmt.Println("Error when pulling metrics ", metric, ": ", err) + return err + } + return nil +} + +func (c *CacheWorker) pullLowCadenceMetrics() { + c.pullMetricsLog("factory", "/getFactory", true) + c.pullMetricsLog("extractor", "/getExtractor", true) + c.pullMetricsLog("dropPod", "/getDropPod", false) + c.pullMetricsLog("storageInv", "/getStorageInv", false) + c.pullMetricsLog("worldInv", "/getWorldInv", false) + c.pullMetricsLog("droneStation", "/getDroneStation", false) +} + +func (c *CacheWorker) pullRealtimeMetrics() { + c.pullMetricsLog("drone", "/getDrone", true) + c.pullMetricsLog("train", "/getTrains", true) + c.pullMetricsLog("truck", "/getVehicles", true) + c.pullMetricsLog("trainStation", "/getTrainStation", true) + c.pullMetricsLog("truckStation", "/getTruckStation", true) +} + +func (c *CacheWorker) Start() { + //TODO: grab current time, update here instead of using postgres time.now so all metrics can sync on the same time and can aggregate properly. + c.now = Clock.Now() + c.flushMetricHistory() + c.pullLowCadenceMetrics() + c.pullRealtimeMetrics() + counter := 0 + for { + select { + case <-c.ctx.Done(): + return + case <-Clock.After(5 * time.Second): + c.now = Clock.Now() + counter = counter + 1 + c.pullRealtimeMetrics() + if counter > 11 { + c.pullLowCadenceMetrics() + counter = 0 + } + } + } +} + +func (c *CacheWorker) Stop() { + c.cancel() +} diff --git a/satisfactory-metadata/go.mod b/satisfactory-metadata/go.mod new file mode 100644 index 0000000..501cf9d --- /dev/null +++ b/satisfactory-metadata/go.mod @@ -0,0 +1,8 @@ +module github.com/justereseau/satisfactory-metrics/satisfactory-metadata + +go 1.21.6 + +require ( + github.com/benbjohnson/clock v1.3.5 + github.com/lib/pq v1.10.9 +) diff --git a/satisfactory-metadata/go.sum b/satisfactory-metadata/go.sum new file mode 100644 index 0000000..facdc1b --- /dev/null +++ b/satisfactory-metadata/go.sum @@ -0,0 +1,4 @@ +github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= +github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= diff --git a/satisfactory-metadata/main.go b/satisfactory-metadata/main.go new file mode 100644 index 0000000..5d85109 --- /dev/null +++ b/satisfactory-metadata/main.go @@ -0,0 +1,56 @@ +package main + +import ( + "database/sql" + "flag" + "fmt" + + _ "github.com/lib/pq" +) + +// Define parameters +var ( + logLevel = flag.String("log.level", "info", "Only log messages with the given severity or above. One of: [debug, info, warn, error, none]") + frmApiAddress = flag.String("frm.listen-address", "http://localhost:8080", "Address of Ficsit Remote Monitoring webserver") + + pgHost = flag.String("db.pghost", "postgres", "postgres hostname") + pgPort = flag.Int("db.pgport", 5432, "postgres port") + pgPassword = flag.String("db.pgpassword", "secretpassword", "postgres password") + pgUser = flag.String("db.pguser", "postgres", "postgres username") + pgDb = flag.String("db.pgdb", "postgres", "postgres db") +) + +func main() { + psqlconn := fmt.Sprintf("host=%s port=%d user=%s password=%s dbname=%s sslmode=disable", pgHost, pgPort, pgUser, pgPassword, pgDb) + db, err := sql.Open("postgres", psqlconn) + + if err != nil { + panic("Failed to connect to database: " + err.Error()) + } + defer db.Close() + + err = db.Ping() + + if err != nil { + panic("Failed to ping database: " + err.Error()) + } + + // cacheWorker := NewCacheWorker("http://"+frmHostname+":"+strconv.Itoa(frmPort), db) + // go cacheWorker.Start() + + // fmt.Printf(` + // FRM Cache started + // Press ctrl-c to exit`) + + // // Wait for an interrupt signal + // sigChan := make(chan os.Signal, 1) + // if runtime.GOOS == "windows" { + // signal.Notify(sigChan, os.Interrupt) + // } else { + // signal.Notify(sigChan, syscall.SIGTERM) + // signal.Notify(sigChan, syscall.SIGINT) + // } + // <-sigChan + + // cacheWorker.Stop() +}