-
Notifications
You must be signed in to change notification settings - Fork 4
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Loading status checks…
Observability (#22)
* observability platform initial work
1 parent
fd641af
commit fd9790b
Showing
34 changed files
with
2,035 additions
and
52 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,142 @@ | ||
package main | ||
|
||
import ( | ||
"bytes" | ||
"context" | ||
"encoding/json" | ||
"flag" | ||
"fmt" | ||
"log" | ||
"net/http" | ||
"os" | ||
"strings" | ||
|
||
"github.com/aws/aws-sdk-go-v2/aws" | ||
"github.com/aws/aws-sdk-go-v2/config" | ||
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs" | ||
"github.com/aws/aws-sdk-go-v2/service/cloudwatchlogs/types" | ||
) | ||
|
||
func main() { | ||
var ( | ||
logGroupName string | ||
) | ||
flag.StringVar(&logGroupName, "log-group", "", "log group to ingest") | ||
flag.Parse() | ||
|
||
if logGroupName == "" { | ||
flag.PrintDefaults() | ||
os.Exit(1) | ||
} | ||
|
||
cfg, err := config.LoadDefaultConfig(context.TODO()) | ||
if err != nil { | ||
log.Fatalf("unable to load SDK config, %v", err) | ||
} | ||
|
||
svc := cloudwatchlogs.NewFromConfig(cfg) | ||
|
||
logStreams, err := fetchLogStreams(svc, logGroupName) | ||
if err != nil { | ||
log.Fatalf("failed to fetch log streams: %v", err) | ||
} | ||
|
||
for _, stream := range logStreams { | ||
err := fetchLogEvents(svc, logGroupName, *stream.LogStreamName) | ||
if err != nil { | ||
log.Printf("failed to fetch log events for stream %s: %v", *stream.LogStreamName, err) | ||
return | ||
} | ||
} | ||
} | ||
|
||
func fetchLogStreams(svc *cloudwatchlogs.Client, logGroupName string) ([]types.LogStream, error) { | ||
var allStreams []types.LogStream | ||
nextToken := "" | ||
|
||
for { | ||
input := &cloudwatchlogs.DescribeLogStreamsInput{ | ||
LogGroupName: aws.String(logGroupName), | ||
} | ||
|
||
if nextToken != "" { | ||
input.NextToken = aws.String(nextToken) | ||
} | ||
|
||
result, err := svc.DescribeLogStreams(context.TODO(), input) | ||
if err != nil { | ||
return nil, err | ||
} | ||
|
||
allStreams = append(allStreams, result.LogStreams...) | ||
|
||
if result.NextToken == nil { | ||
break | ||
} | ||
|
||
nextToken = *result.NextToken | ||
} | ||
|
||
return allStreams, nil | ||
} | ||
|
||
func fetchLogEvents(svc *cloudwatchlogs.Client, logGroupName, logStreamName string) error { | ||
nextToken := "" | ||
messages := []map[string]any{} | ||
logStreamNameSplit := strings.Split(logStreamName, "/") | ||
logStreamWithoutRandom := strings.Join(logStreamNameSplit[:len(logStreamNameSplit)-1], "/") | ||
|
||
for { | ||
input := &cloudwatchlogs.GetLogEventsInput{ | ||
LogGroupName: aws.String(logGroupName), | ||
LogStreamName: aws.String(logStreamName), | ||
StartFromHead: aws.Bool(true), | ||
} | ||
|
||
if nextToken != "" { | ||
input.NextToken = aws.String(nextToken) | ||
} | ||
|
||
result, err := svc.GetLogEvents(context.TODO(), input) | ||
if err != nil { | ||
return err | ||
} | ||
|
||
for _, event := range result.Events { | ||
seconds := float64(*event.Timestamp / 1000) | ||
microseconds := float64(*event.Timestamp%1000) * 1000 | ||
messages = append(messages, map[string]any{ | ||
"date": seconds + (microseconds / 1e6), | ||
"log": *event.Message, | ||
"log-group": logGroupName, | ||
"log-stream": logStreamWithoutRandom, | ||
}) | ||
} | ||
|
||
if result.NextForwardToken == nil || nextToken == *result.NextForwardToken { | ||
break | ||
} | ||
|
||
nextToken = *result.NextForwardToken | ||
} | ||
|
||
if len(messages) == 0 { | ||
return nil | ||
} | ||
|
||
out, err := json.Marshal(messages) | ||
if err != nil { | ||
return err | ||
} | ||
resp, err := http.Post("http://localhost/api/observability/ingestion/json", "image/jpeg", bytes.NewBuffer(out)) | ||
if err != nil { | ||
return err | ||
} | ||
if resp.StatusCode != 200 { | ||
return fmt.Errorf("response code is not 200") | ||
} | ||
|
||
fmt.Printf("Ingested log-group %s, stream %s: %d messages\n", logGroupName, logStreamWithoutRandom, len(messages)) | ||
|
||
return nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,178 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"fmt" | ||
"io" | ||
"path" | ||
"strconv" | ||
"strings" | ||
"time" | ||
|
||
"github.com/in4it/wireguard-server/pkg/logging" | ||
"github.com/in4it/wireguard-server/pkg/storage" | ||
) | ||
|
||
func (o *Observability) WriteBufferToStorage(n int64) error { | ||
o.ActiveBufferWriters.Add(1) | ||
defer o.ActiveBufferWriters.Done() | ||
o.WriteLock.Lock() | ||
defer o.WriteLock.Unlock() | ||
logging.DebugLog(fmt.Errorf("writing buffer to file. Buffer has: %d bytes", n)) | ||
// copy first to temporary buffer (storage might have latency) | ||
tempBuf := bytes.NewBuffer(make([]byte, 0, n)) | ||
_, err := io.CopyN(tempBuf, o.Buffer, n) | ||
if err != nil && err != io.EOF { | ||
return fmt.Errorf("write error from buffer to temporary buffer: %s", err) | ||
} | ||
prefix := o.Buffer.ReadPrefix(n) | ||
o.LastFlushed = time.Now() | ||
|
||
for _, bufferPosAndPrefix := range mergeBufferPosAndPrefix(prefix) { | ||
now := time.Now() | ||
filename := bufferPosAndPrefix.prefix + "/data-" + strconv.FormatInt(now.Unix(), 10) + "-" + strconv.FormatUint(o.FlushOverflowSequence.Add(1), 10) | ||
err = ensurePath(o.Storage, filename) | ||
if err != nil { | ||
return fmt.Errorf("ensure path error: %s", err) | ||
} | ||
file, err := o.Storage.OpenFileForWriting(filename) | ||
if err != nil { | ||
return fmt.Errorf("open file for writing error: %s", err) | ||
} | ||
_, err = io.CopyN(file, tempBuf, int64(bufferPosAndPrefix.offset)) | ||
if err != nil && err != io.EOF { | ||
return fmt.Errorf("file write error: %s", err) | ||
} | ||
logging.DebugLog(fmt.Errorf("wrote file: %s", filename)) | ||
err = file.Close() | ||
if err != nil { | ||
return fmt.Errorf("file close error: %s", err) | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (o *Observability) monitorBuffer() { | ||
for { | ||
time.Sleep(FLUSH_TIME_MAX_MINUTES * time.Minute) | ||
if time.Since(o.LastFlushed) >= (FLUSH_TIME_MAX_MINUTES*time.Minute) && o.Buffer.Len() > 0 { | ||
if o.FlushOverflow.CompareAndSwap(false, true) { | ||
err := o.WriteBufferToStorage(int64(o.Buffer.Len())) | ||
o.FlushOverflow.Swap(true) | ||
if err != nil { | ||
logging.ErrorLog(fmt.Errorf("write log buffer to storage error: %s", err)) | ||
continue | ||
} | ||
} | ||
o.LastFlushed = time.Now() | ||
} | ||
} | ||
} | ||
|
||
func (o *Observability) Ingest(data io.ReadCloser) error { | ||
defer data.Close() | ||
msgs, err := Decode(data) | ||
if err != nil { | ||
return fmt.Errorf("decode error: %s", err) | ||
} | ||
logging.DebugLog(fmt.Errorf("messages ingested: %d", len(msgs))) | ||
if len(msgs) == 0 { | ||
return nil // no messages to ingest | ||
} | ||
_, err = o.Buffer.Write(encodeMessage(msgs), FloatToDate(msgs[0].Date).Format(DATE_PREFIX)) | ||
if err != nil { | ||
return fmt.Errorf("write error: %s", err) | ||
} | ||
if o.Buffer.Len() >= o.MaxBufferSize { | ||
if o.FlushOverflow.CompareAndSwap(false, true) { | ||
go func() { // write to storage | ||
if n := o.Buffer.Len(); n >= o.MaxBufferSize { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
logging.ErrorLog(fmt.Errorf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err)) | ||
} | ||
} | ||
o.FlushOverflow.Swap(false) | ||
}() | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (o *Observability) Flush() error { | ||
// wait until all data is flushed | ||
o.ActiveBufferWriters.Wait() | ||
|
||
// flush remaining data that hasn't been flushed | ||
if n := o.Buffer.Len(); n >= 0 { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
return fmt.Errorf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err) | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func (c *ConcurrentRWBuffer) Write(p []byte, prefix string) (n int, err error) { | ||
c.mu.Lock() | ||
defer c.mu.Unlock() | ||
c.prefix = append(c.prefix, BufferPosAndPrefix{prefix: prefix, offset: len(p)}) | ||
return c.buffer.Write(p) | ||
} | ||
func (c *ConcurrentRWBuffer) Read(p []byte) (n int, err error) { | ||
c.mu.Lock() | ||
defer c.mu.Unlock() | ||
return c.buffer.Read(p) | ||
} | ||
func (c *ConcurrentRWBuffer) ReadPrefix(n int64) []BufferPosAndPrefix { | ||
c.mu.Lock() | ||
defer c.mu.Unlock() | ||
totalOffset := 0 | ||
for k, v := range c.prefix { | ||
if int64(totalOffset+v.offset) == n { | ||
part1 := c.prefix[:k+1] | ||
part2 := make([]BufferPosAndPrefix, len(c.prefix[k+1:])) | ||
copy(part2, c.prefix[k+1:]) | ||
c.prefix = part2 | ||
return part1 | ||
} | ||
totalOffset += v.offset | ||
} | ||
return nil | ||
} | ||
func (c *ConcurrentRWBuffer) Len() int { | ||
return c.buffer.Len() | ||
} | ||
func (c *ConcurrentRWBuffer) Cap() int { | ||
return c.buffer.Cap() | ||
} | ||
|
||
func ensurePath(storage storage.Iface, filename string) error { | ||
base := path.Dir(filename) | ||
baseSplit := strings.Split(base, "/") | ||
fullPath := "" | ||
for _, v := range baseSplit { | ||
fullPath = path.Join(fullPath, v) | ||
err := storage.EnsurePath(fullPath) | ||
if err != nil { | ||
return err | ||
} | ||
} | ||
return nil | ||
} | ||
|
||
func mergeBufferPosAndPrefix(a []BufferPosAndPrefix) []BufferPosAndPrefix { | ||
bufferPosAndPrefix := []BufferPosAndPrefix{} | ||
for i := 0; i < len(a); i++ { | ||
offset := a[i].offset | ||
for y := i; y+1 < len(a) && a[y].prefix == a[y+1].prefix; y++ { | ||
offset += a[y+1].offset | ||
i++ | ||
} | ||
bufferPosAndPrefix = append(bufferPosAndPrefix, BufferPosAndPrefix{ | ||
prefix: a[i].prefix, | ||
offset: offset, | ||
}) | ||
} | ||
return bufferPosAndPrefix | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,307 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"encoding/json" | ||
"fmt" | ||
"io" | ||
"slices" | ||
"strconv" | ||
"testing" | ||
|
||
"github.com/in4it/wireguard-server/pkg/logging" | ||
memorystorage "github.com/in4it/wireguard-server/pkg/storage/memory" | ||
) | ||
|
||
func TestIngestion(t *testing.T) { | ||
logging.Loglevel = logging.LOG_DEBUG | ||
totalMessagesToGenerate := 20 | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, 20) | ||
o.Storage = storage | ||
payloads := IncomingData{} | ||
for i := 0; i < totalMessagesToGenerate/10; i++ { | ||
payloads = append(payloads, map[string]any{ | ||
"date": 1720613813.197045, | ||
"log": "this is string: " + strconv.Itoa(i), | ||
}) | ||
} | ||
|
||
for i := 0; i < totalMessagesToGenerate/len(payloads); i++ { | ||
payloadBytes, err := json.Marshal(payloads) | ||
if err != nil { | ||
t.Fatalf("marshal error: %s", err) | ||
} | ||
data := io.NopCloser(bytes.NewReader(payloadBytes)) | ||
err = o.Ingest(data) | ||
if err != nil { | ||
t.Fatalf("ingest error: %s", err) | ||
} | ||
} | ||
|
||
err := o.Flush() | ||
if err != nil { | ||
t.Fatalf("flush error: %s", err) | ||
} | ||
|
||
dirlist, err := storage.ReadDir("") | ||
if err != nil { | ||
t.Fatalf("read dir error: %s", err) | ||
} | ||
|
||
totalMessages := 0 | ||
for _, file := range dirlist { | ||
messages, err := storage.ReadFile(file) | ||
if err != nil { | ||
t.Fatalf("read file error: %s", err) | ||
} | ||
decodedMessages := decodeMessages(messages) | ||
totalMessages += len(decodedMessages) | ||
} | ||
if len(dirlist) == 0 { | ||
t.Fatalf("expected multiple files in directory, got %d", len(dirlist)) | ||
} | ||
|
||
if totalMessages != totalMessagesToGenerate { | ||
t.Fatalf("Tried to generate total message count of: %d; got: %d", totalMessagesToGenerate, totalMessages) | ||
} | ||
} | ||
|
||
func TestIngestionMoreMessages(t *testing.T) { | ||
t.Skip() // we can skip this for general unit testing | ||
totalMessagesToGenerate := 10000000 // 10,000,000 | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, MAX_BUFFER_SIZE) | ||
payload := IncomingData{ | ||
{ | ||
"date": 1720613813.197045, | ||
"log": "this is string: ", | ||
}, | ||
} | ||
payloadBytes, err := json.Marshal(payload) | ||
if err != nil { | ||
t.Fatalf("marshal error: %s", err) | ||
} | ||
|
||
for i := 0; i < totalMessagesToGenerate; i++ { | ||
data := io.NopCloser(bytes.NewReader(payloadBytes)) | ||
err := o.Ingest(data) | ||
if err != nil { | ||
t.Fatalf("ingest error: %s", err) | ||
} | ||
} | ||
|
||
err = o.Flush() | ||
if err != nil { | ||
t.Fatalf("flush error: %s", err) | ||
} | ||
|
||
dirlist, err := storage.ReadDir("") | ||
if err != nil { | ||
t.Fatalf("read dir error: %s", err) | ||
} | ||
|
||
totalMessages := 0 | ||
for _, file := range dirlist { | ||
messages, err := storage.ReadFile(file) | ||
if err != nil { | ||
t.Fatalf("read file error: %s", err) | ||
} | ||
decodedMessages := decodeMessages(messages) | ||
totalMessages += len(decodedMessages) | ||
} | ||
if len(dirlist) == 0 { | ||
t.Fatalf("expected multiple files in directory, got %d", len(dirlist)) | ||
} | ||
|
||
if totalMessages != totalMessagesToGenerate { | ||
t.Fatalf("Tried to generate total message count of: %d; got: %d", totalMessagesToGenerate, totalMessages) | ||
} | ||
fmt.Printf("Buffer size (read+unread): %d in %d files\n", o.Buffer.Cap(), len(dirlist)) | ||
|
||
} | ||
|
||
func BenchmarkIngest10000000(b *testing.B) { | ||
totalMessagesToGenerate := 10000000 // 10,000,000 | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, MAX_BUFFER_SIZE) | ||
payload := IncomingData{ | ||
{ | ||
"date": 1720613813.197045, | ||
"log": "this is string", | ||
}, | ||
} | ||
payloadBytes, err := json.Marshal(payload) | ||
if err != nil { | ||
b.Fatalf("marshal error: %s", err) | ||
} | ||
|
||
for i := 0; i < totalMessagesToGenerate; i++ { | ||
data := io.NopCloser(bytes.NewReader(payloadBytes)) | ||
err := o.Ingest(data) | ||
if err != nil { | ||
b.Fatalf("ingest error: %s", err) | ||
} | ||
} | ||
|
||
// wait until all data is flushed | ||
o.ActiveBufferWriters.Wait() | ||
|
||
// flush remaining data that hasn't been flushed | ||
if n := o.Buffer.Len(); n >= 0 { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
b.Fatalf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err) | ||
} | ||
} | ||
} | ||
|
||
func BenchmarkIngest100000000(b *testing.B) { | ||
totalMessagesToGenerate := 10000000 // 10,000,000 | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, MAX_BUFFER_SIZE) | ||
o.Storage = storage | ||
payload := IncomingData{ | ||
{ | ||
"date": 1720613813.197045, | ||
"log": "this is string", | ||
}, | ||
} | ||
payloadBytes, err := json.Marshal(payload) | ||
if err != nil { | ||
b.Fatalf("marshal error: %s", err) | ||
} | ||
|
||
for i := 0; i < totalMessagesToGenerate; i++ { | ||
data := io.NopCloser(bytes.NewReader(payloadBytes)) | ||
err := o.Ingest(data) | ||
if err != nil { | ||
b.Fatalf("ingest error: %s", err) | ||
} | ||
} | ||
|
||
// wait until all data is flushed | ||
o.ActiveBufferWriters.Wait() | ||
|
||
// flush remaining data that hasn't been flushed | ||
if n := o.Buffer.Len(); n >= 0 { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
b.Fatalf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err) | ||
} | ||
} | ||
} | ||
|
||
func TestEnsurePath(t *testing.T) { | ||
storage := &memorystorage.MockMemoryStorage{} | ||
err := ensurePath(storage, "a/b/c/filename.txt") | ||
if err != nil { | ||
t.Fatalf("error: %s", err) | ||
} | ||
} | ||
|
||
func TestMergeBufferPosAndPrefix(t *testing.T) { | ||
testCase1 := []BufferPosAndPrefix{ | ||
{ | ||
prefix: "abc", | ||
offset: 3, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 9, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 2, | ||
}, | ||
{ | ||
prefix: "abc2", | ||
offset: 3, | ||
}, | ||
{ | ||
prefix: "abc2", | ||
offset: 2, | ||
}, | ||
{ | ||
prefix: "abc3", | ||
offset: 2, | ||
}, | ||
} | ||
expected1 := []BufferPosAndPrefix{ | ||
{ | ||
prefix: "abc", | ||
offset: 14, | ||
}, | ||
{ | ||
prefix: "abc2", | ||
offset: 5, | ||
}, | ||
{ | ||
prefix: "abc3", | ||
offset: 2, | ||
}, | ||
} | ||
res := mergeBufferPosAndPrefix(testCase1) | ||
if !slices.Equal(res, expected1) { | ||
t.Fatalf("test case is not equal to expected\nGot: %+v\nExpected:%+v\n", res, expected1) | ||
} | ||
} | ||
|
||
func TestReadPrefix(t *testing.T) { | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, MAX_BUFFER_SIZE) | ||
o.Buffer.prefix = []BufferPosAndPrefix{ | ||
{ | ||
prefix: "abc", | ||
offset: 3, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 9, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 2, | ||
}, | ||
{ | ||
prefix: "abc2", | ||
offset: 3, | ||
}, | ||
{ | ||
prefix: "abc2", | ||
offset: 2, | ||
}, | ||
{ | ||
prefix: "abc3", | ||
offset: 2, | ||
}, | ||
} | ||
expected1 := []BufferPosAndPrefix{ | ||
{ | ||
prefix: "abc", | ||
offset: 3, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 9, | ||
}, | ||
{ | ||
prefix: "abc", | ||
offset: 2, | ||
}, | ||
} | ||
expected2 := []BufferPosAndPrefix{ | ||
{ | ||
prefix: "abc2", | ||
offset: 3, | ||
}, | ||
} | ||
res := o.Buffer.ReadPrefix(int64(o.Buffer.prefix[0].offset + o.Buffer.prefix[1].offset + o.Buffer.prefix[2].offset)) | ||
if !slices.Equal(res, expected1) { | ||
t.Fatalf("test case is not equal to expected\nGot: %+v\nExpected:%+v\n", res, expected1) | ||
} | ||
res2 := o.Buffer.ReadPrefix(3) | ||
if !slices.Equal(res2, expected2) { | ||
t.Fatalf("test case is not equal to expected\nGot: %+v\nExpected:%+v\n", res, expected2) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,8 @@ | ||
package observability | ||
|
||
const MAX_BUFFER_SIZE = 1024 * 1024 // 1 MB | ||
const FLUSH_TIME_MAX_MINUTES = 1 // should have 5 as default at release | ||
|
||
const TIMESTAMP_FORMAT = "2006-01-02T15:04:05" | ||
|
||
const DATE_PREFIX = "2006/01/02" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"encoding/binary" | ||
"math" | ||
) | ||
|
||
func encodeMessage(msgs []FluentBitMessage) []byte { | ||
out := bytes.Buffer{} | ||
for _, msg := range msgs { | ||
var buf [8]byte | ||
binary.LittleEndian.PutUint64(buf[:], math.Float64bits(msg.Date)) | ||
out.Write(buf[:]) | ||
for key, msgData := range msg.Data { | ||
out.Write([]byte(key)) | ||
out.Write([]byte{0xff}) | ||
out.Write([]byte(msgData)) | ||
out.Write([]byte{0xff}) | ||
} | ||
out.Write([]byte{0xff}) | ||
} | ||
return out.Bytes() | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,21 +1,96 @@ | ||
package observability | ||
|
||
import ( | ||
"encoding/json" | ||
"fmt" | ||
"net/http" | ||
"strconv" | ||
"strings" | ||
"time" | ||
) | ||
|
||
func (o *Observability) observabilityHandler(w http.ResponseWriter, r *http.Request) { | ||
w.WriteHeader(http.StatusNotFound) | ||
} | ||
|
||
func (o *Observability) ingestionHandler(w http.ResponseWriter, r *http.Request) { | ||
msgs, err := Decode(r.Body) | ||
if err != nil { | ||
if r.Method != http.MethodPost { | ||
w.WriteHeader(http.StatusBadRequest) | ||
return | ||
} | ||
|
||
if err := o.Ingest(r.Body); err != nil { | ||
w.WriteHeader(http.StatusBadRequest) | ||
fmt.Printf("error: %s", err) | ||
return | ||
} | ||
fmt.Printf("Got msgs: %+v\n", msgs) | ||
w.WriteHeader(http.StatusOK) | ||
} | ||
|
||
func (o *Observability) logsHandler(w http.ResponseWriter, r *http.Request) { | ||
if r.Method != http.MethodGet { | ||
w.WriteHeader(http.StatusBadRequest) | ||
return | ||
} | ||
if r.FormValue("fromDate") == "" { | ||
o.returnError(w, fmt.Errorf("no from date supplied"), http.StatusBadRequest) | ||
return | ||
} | ||
fromDate, err := time.Parse("2006-01-02", r.FormValue("fromDate")) | ||
if err != nil { | ||
o.returnError(w, fmt.Errorf("invalid date: %s", err), http.StatusBadRequest) | ||
return | ||
} | ||
if r.FormValue("endDate") == "" { | ||
o.returnError(w, fmt.Errorf("no end date supplied"), http.StatusBadRequest) | ||
return | ||
} | ||
endDate, err := time.Parse("2006-01-02", r.FormValue("endDate")) | ||
if err != nil { | ||
o.returnError(w, fmt.Errorf("invalid date: %s", err), http.StatusBadRequest) | ||
return | ||
} | ||
offset := 0 | ||
if r.FormValue("offset") != "" { | ||
i, err := strconv.Atoi(r.FormValue("offset")) | ||
if err == nil { | ||
offset = i | ||
} | ||
} | ||
maxLines := 0 | ||
if r.FormValue("maxLines") != "" { | ||
i, err := strconv.Atoi(r.FormValue("maxLines")) | ||
if err == nil { | ||
maxLines = i | ||
} | ||
} | ||
pos := int64(0) | ||
if r.FormValue("pos") != "" { | ||
i, err := strconv.ParseInt(r.FormValue("pos"), 10, 64) | ||
if err == nil { | ||
pos = i | ||
} | ||
} | ||
displayTags := strings.Split(r.FormValue("display-tags"), ",") | ||
filterTagsSplit := strings.Split(r.FormValue("filter-tags"), ",") | ||
filterTags := []KeyValue{} | ||
for _, tag := range filterTagsSplit { | ||
kv := strings.Split(tag, "=") | ||
if len(kv) == 2 { | ||
filterTags = append(filterTags, KeyValue{Key: kv[0], Value: kv[1]}) | ||
} | ||
} | ||
out, err := o.getLogs(fromDate, endDate, pos, maxLines, offset, r.FormValue("search"), displayTags, filterTags) | ||
if err != nil { | ||
w.WriteHeader(http.StatusBadRequest) | ||
fmt.Printf("get logs error: %s", err) | ||
return | ||
} | ||
outBytes, err := json.Marshal(out) | ||
if err != nil { | ||
w.WriteHeader(http.StatusBadRequest) | ||
fmt.Printf("marshal error: %s", err) | ||
return | ||
} | ||
w.Write(outBytes) | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,66 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"encoding/json" | ||
"net/http" | ||
"net/http/httptest" | ||
"testing" | ||
|
||
memorystorage "github.com/in4it/wireguard-server/pkg/storage/memory" | ||
) | ||
|
||
func TestIngestionHandler(t *testing.T) { | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, 20) | ||
o.Storage = storage | ||
payload := IncomingData{ | ||
{ | ||
"date": 1720613813.197045, | ||
"log": "this is a string", | ||
}, | ||
} | ||
|
||
payloadBytes, err := json.Marshal(payload) | ||
if err != nil { | ||
t.Fatalf("marshal error: %s", err) | ||
} | ||
req := httptest.NewRequest(http.MethodPost, "/api/observability/ingestion/json", bytes.NewReader(payloadBytes)) | ||
w := httptest.NewRecorder() | ||
o.ingestionHandler(w, req) | ||
res := w.Result() | ||
|
||
if res.StatusCode != http.StatusOK { | ||
t.Fatalf("expected status code OK. Got: %d", res.StatusCode) | ||
} | ||
|
||
// wait until all data is flushed | ||
o.ActiveBufferWriters.Wait() | ||
|
||
// flush remaining data that hasn't been flushed | ||
if n := o.Buffer.Len(); n >= 0 { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
t.Fatalf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err) | ||
} | ||
} | ||
|
||
dirlist, err := storage.ReadDir("") | ||
if err != nil { | ||
t.Fatalf("read dir error: %s", err) | ||
} | ||
if len(dirlist) == 0 { | ||
t.Fatalf("dir is empty") | ||
} | ||
messages, err := storage.ReadFile(dirlist[0]) | ||
if err != nil { | ||
t.Fatalf("read file error: %s", err) | ||
} | ||
decodedMessages := decodeMessages(messages) | ||
if decodedMessages[0].Date != 1720613813.197045 { | ||
t.Fatalf("unexpected date. Got %f, expected: %f", decodedMessages[0].Date, 1720613813.197045) | ||
} | ||
if decodedMessages[0].Data["log"] != "this is a string" { | ||
t.Fatalf("unexpected log data") | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
package observability | ||
|
||
import ( | ||
"fmt" | ||
"math" | ||
"net/http" | ||
"strings" | ||
"time" | ||
) | ||
|
||
func (o *Observability) returnError(w http.ResponseWriter, err error, statusCode int) { | ||
fmt.Println("========= ERROR =========") | ||
fmt.Printf("Error: %s\n", err) | ||
fmt.Println("=========================") | ||
w.WriteHeader(statusCode) | ||
w.Write([]byte(`{"error": "` + strings.Replace(err.Error(), `"`, `\"`, -1) + `"}`)) | ||
} | ||
|
||
func FloatToDate(datetime float64) time.Time { | ||
datetimeInt := int64(datetime) | ||
decimals := datetime - float64(datetimeInt) | ||
nsecs := int64(math.Round(decimals * 1_000_000)) // precision to match golang's time.Time | ||
return time.Unix(datetimeInt, nsecs*1000) | ||
} | ||
|
||
func DateToFloat(datetime time.Time) float64 { | ||
seconds := float64(datetime.Unix()) | ||
nanoseconds := float64(datetime.Nanosecond()) / 1e9 | ||
fmt.Printf("nanosec: %f", nanoseconds) | ||
return seconds + nanoseconds | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,15 @@ | ||
package observability | ||
|
||
import ( | ||
"testing" | ||
"time" | ||
) | ||
|
||
func TestFloatToDate2Way(t *testing.T) { | ||
now := time.Now() | ||
float := DateToFloat(now) | ||
date := FloatToDate(float) | ||
if date.Format(TIMESTAMP_FORMAT) != now.Format(TIMESTAMP_FORMAT) { | ||
t.Fatalf("got: %s, expected: %s", date.Format(TIMESTAMP_FORMAT), now.Format(TIMESTAMP_FORMAT)) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,116 @@ | ||
package observability | ||
|
||
import ( | ||
"bufio" | ||
"fmt" | ||
"sort" | ||
"strings" | ||
"time" | ||
) | ||
|
||
func (o *Observability) getLogs(fromDate, endDate time.Time, pos int64, maxLogLines, offset int, search string, displayTags []string, filterTags []KeyValue) (LogEntryResponse, error) { | ||
logEntryResponse := LogEntryResponse{ | ||
Enabled: true, | ||
LogEntries: []LogEntry{}, | ||
Tags: KeyValueInt{}, | ||
} | ||
|
||
keys := make(map[KeyValue]int) | ||
|
||
logFiles := []string{} | ||
|
||
if maxLogLines == 0 { | ||
maxLogLines = 100 | ||
} | ||
|
||
for d := fromDate; d.Before(endDate) || d.Equal(endDate); d = d.AddDate(0, 0, 1) { | ||
fileList, err := o.Storage.ReadDir(d.Format(DATE_PREFIX)) | ||
if err != nil { | ||
logEntryResponse.NextPos = -1 | ||
return logEntryResponse, nil // can't read directory, return empty response | ||
} | ||
for _, filename := range fileList { | ||
logFiles = append(logFiles, d.Format(DATE_PREFIX)+"/"+filename) | ||
} | ||
} | ||
|
||
fileReaders, err := o.Storage.OpenFilesFromPos(logFiles, pos) | ||
if err != nil { | ||
return logEntryResponse, fmt.Errorf("error while reading files: %s", err) | ||
} | ||
for _, fileReader := range fileReaders { | ||
defer fileReader.Close() | ||
} | ||
|
||
for _, logInputData := range fileReaders { // read multiple files | ||
if len(logEntryResponse.LogEntries) >= maxLogLines { | ||
break | ||
} | ||
scanner := bufio.NewScanner(logInputData) | ||
scanner.Split(func(data []byte, atEOF bool) (advance int, token []byte, err error) { | ||
advance, token, err = scanMessage(data, atEOF) | ||
pos += int64(advance) | ||
return | ||
}) | ||
for scanner.Scan() && len(logEntryResponse.LogEntries) < maxLogLines { // read multiple lines | ||
// decode, store as logentry | ||
logMessage := decodeMessage(scanner.Bytes()) | ||
logline, ok := logMessage.Data["log"] | ||
if ok { | ||
timestamp := FloatToDate(logMessage.Date).Add(time.Duration(offset) * time.Minute) | ||
if search == "" || strings.Contains(logline, search) { | ||
tags := []KeyValue{} | ||
for _, tag := range displayTags { | ||
if tagValue, ok := logMessage.Data[tag]; ok { | ||
tags = append(tags, KeyValue{Key: tag, Value: tagValue}) | ||
} | ||
} | ||
filterMessage := true | ||
if len(filterTags) == 0 { | ||
filterMessage = false | ||
} else { | ||
for _, filter := range filterTags { | ||
if tagValue, ok := logMessage.Data[filter.Key]; ok { | ||
if tagValue == filter.Value { | ||
filterMessage = false | ||
} | ||
} | ||
} | ||
} | ||
if !filterMessage { | ||
logEntry := LogEntry{ | ||
Timestamp: timestamp.Format(TIMESTAMP_FORMAT), | ||
Data: logline, | ||
Tags: tags, | ||
} | ||
logEntryResponse.LogEntries = append(logEntryResponse.LogEntries, logEntry) | ||
for k, v := range logMessage.Data { | ||
if k != "log" { | ||
keys[KeyValue{Key: k, Value: v}] += 1 | ||
} | ||
} | ||
} | ||
} | ||
} | ||
} | ||
if err := scanner.Err(); err != nil { | ||
return logEntryResponse, fmt.Errorf("log file read (scanner) error: %s", err) | ||
} | ||
} | ||
if len(logEntryResponse.LogEntries) < maxLogLines { | ||
logEntryResponse.NextPos = -1 // no more records | ||
} else { | ||
logEntryResponse.NextPos = pos | ||
} | ||
|
||
for k, v := range keys { | ||
logEntryResponse.Tags = append(logEntryResponse.Tags, KeyValueTotal{ | ||
Key: k.Key, | ||
Value: k.Value, | ||
Total: v, | ||
}) | ||
} | ||
sort.Sort(logEntryResponse.Tags) | ||
|
||
return logEntryResponse, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,96 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"encoding/json" | ||
"io" | ||
"strconv" | ||
"strings" | ||
"testing" | ||
"time" | ||
|
||
"github.com/in4it/wireguard-server/pkg/logging" | ||
memorystorage "github.com/in4it/wireguard-server/pkg/storage/memory" | ||
) | ||
|
||
func TestGetLogs(t *testing.T) { | ||
logging.Loglevel = logging.LOG_DEBUG | ||
totalMessagesToGenerate := 100 | ||
storage := &memorystorage.MockMemoryStorage{} | ||
o := NewWithoutMonitor(storage, 20) | ||
timestamp := DateToFloat(time.Now()) | ||
payload := IncomingData{ | ||
{ | ||
"date": timestamp, | ||
"log": "this is string: ", | ||
}, | ||
} | ||
|
||
for i := 0; i < totalMessagesToGenerate; i++ { | ||
payload[0]["log"] = "this is string: " + strconv.Itoa(i) | ||
payloadBytes, err := json.Marshal(payload) | ||
if err != nil { | ||
t.Fatalf("marshal error: %s", err) | ||
} | ||
data := io.NopCloser(bytes.NewReader(payloadBytes)) | ||
err = o.Ingest(data) | ||
if err != nil { | ||
t.Fatalf("ingest error: %s", err) | ||
} | ||
} | ||
|
||
// wait until all data is flushed | ||
o.ActiveBufferWriters.Wait() | ||
|
||
// flush remaining data that hasn't been flushed | ||
if n := o.Buffer.Len(); n >= 0 { | ||
err := o.WriteBufferToStorage(int64(n)) | ||
if err != nil { | ||
t.Fatalf("write log buffer to storage error (buffer: %d): %s", o.Buffer.Len(), err) | ||
} | ||
} | ||
|
||
now := time.Now() | ||
maxLogLines := 100 | ||
search := "" | ||
|
||
logEntryResponse, err := o.getLogs(now, now, 0, maxLogLines, 0, search, []string{}, []KeyValue{}) | ||
if err != nil { | ||
t.Fatalf("get logs error: %s", err) | ||
} | ||
if len(logEntryResponse.LogEntries) != totalMessagesToGenerate { | ||
t.Fatalf("didn't get the same log entries as messaged we generated: got: %d, expected: %d", len(logEntryResponse.LogEntries), totalMessagesToGenerate) | ||
} | ||
if logEntryResponse.LogEntries[0].Timestamp != FloatToDate(timestamp).Format(TIMESTAMP_FORMAT) { | ||
t.Fatalf("unexpected timestamp: %s vs %s", logEntryResponse.LogEntries[0].Timestamp, FloatToDate(timestamp).Format(TIMESTAMP_FORMAT)) | ||
} | ||
} | ||
|
||
func TestFloatToDate(t *testing.T) { | ||
for i := 0; i < 10; i++ { | ||
now := time.Now() | ||
floatDate := float64(now.Unix()) + float64(now.Nanosecond())/1e9 | ||
floatToDate := FloatToDate(floatDate) | ||
if now.Unix() != floatToDate.Unix() { | ||
t.Fatalf("times are not equal. Got: %v, expected: %v", floatToDate, now) | ||
} | ||
/*if now.UnixNano() != floatToDate.UnixNano() { | ||
t.Fatalf("times are not equal. Got: %v, expected: %v", floatToDate, now) | ||
}*/ | ||
} | ||
} | ||
|
||
func TestKeyValue(t *testing.T) { | ||
logEntryResponse := LogEntryResponse{ | ||
Tags: KeyValueInt{ | ||
{Key: "k", Value: "v", Total: 4}, | ||
}, | ||
} | ||
out, err := json.Marshal(logEntryResponse) | ||
if err != nil { | ||
t.Fatalf("error: %s", err) | ||
} | ||
if !strings.Contains(string(out), `"tags":[{"key":"k","value":"v","total":4}]`) { | ||
t.Fatalf("wrong output: %s", out) | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,89 @@ | ||
package observability | ||
|
||
import ( | ||
"bytes" | ||
"strconv" | ||
"strings" | ||
"sync" | ||
"sync/atomic" | ||
"time" | ||
|
||
"github.com/in4it/wireguard-server/pkg/storage" | ||
) | ||
|
||
type IncomingData []map[string]any | ||
|
||
type FluentBitMessage struct { | ||
Date float64 `json:"date"` | ||
Data map[string]any `json:"data"` | ||
Date float64 `json:"date"` | ||
Data map[string]string `json:"data"` | ||
} | ||
|
||
type Observability struct { | ||
Storage storage.Iface | ||
Buffer *ConcurrentRWBuffer | ||
LastFlushed time.Time | ||
FlushOverflow atomic.Bool | ||
FlushOverflowSequence atomic.Uint64 | ||
ActiveBufferWriters sync.WaitGroup | ||
WriteLock sync.Mutex | ||
MaxBufferSize int | ||
} | ||
|
||
type ConcurrentRWBuffer struct { | ||
buffer bytes.Buffer | ||
prefix []BufferPosAndPrefix | ||
mu sync.Mutex | ||
} | ||
|
||
type BufferPosAndPrefix struct { | ||
prefix string | ||
offset int | ||
} | ||
|
||
type LogEntryResponse struct { | ||
Enabled bool `json:"enabled"` | ||
LogEntries []LogEntry `json:"logEntries"` | ||
Tags KeyValueInt `json:"tags"` | ||
NextPos int64 `json:"nextPos"` | ||
} | ||
|
||
type LogEntry struct { | ||
Timestamp string `json:"timestamp"` | ||
Data string `json:"data"` | ||
Tags []KeyValue `json:"tags"` | ||
} | ||
|
||
type KeyValueInt []KeyValueTotal | ||
|
||
type KeyValueTotal struct { | ||
Key string | ||
Value string | ||
Total int | ||
} | ||
type KeyValue struct { | ||
Key string `json:"key"` | ||
Value string `json:"value"` | ||
} | ||
|
||
func (kv KeyValueInt) MarshalJSON() ([]byte, error) { | ||
res := "[" | ||
for _, v := range kv { | ||
res += `{ "key" : "` + v.Key + `", "value": "` + v.Value + `", "total": ` + strconv.Itoa(v.Total) + ` },` | ||
} | ||
res = strings.TrimRight(res, ",") | ||
res += "]" | ||
return []byte(res), nil | ||
} | ||
|
||
func (kv KeyValueInt) Len() int { | ||
return len(kv) | ||
} | ||
func (kv KeyValueInt) Less(i, j int) bool { | ||
if kv[i].Key == kv[j].Key { | ||
return kv[i].Value < kv[j].Value | ||
} | ||
return kv[i].Key < kv[j].Key | ||
} | ||
func (kv KeyValueInt) Swap(i, j int) { | ||
kv[i], kv[j] = kv[j], kv[i] | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,25 @@ | ||
package s3storage | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
"strings" | ||
|
||
"github.com/aws/aws-sdk-go-v2/aws" | ||
"github.com/aws/aws-sdk-go-v2/service/s3" | ||
) | ||
|
||
func (s *S3Storage) ReadDir(pathname string) ([]string, error) { | ||
objectList, err := s.s3Client.ListObjectsV2(context.TODO(), &s3.ListObjectsV2Input{ | ||
Bucket: aws.String(s.bucketname), | ||
Prefix: aws.String(s.prefix + "/" + strings.TrimLeft(pathname, "/")), | ||
}) | ||
if err != nil { | ||
return []string{}, fmt.Errorf("list object error: %s", err) | ||
} | ||
res := make([]string, len(objectList.Contents)) | ||
for k, object := range objectList.Contents { | ||
res[k] = *object.Key | ||
} | ||
return res, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,23 @@ | ||
package s3storage | ||
|
||
import ( | ||
"context" | ||
"fmt" | ||
|
||
"github.com/aws/aws-sdk-go-v2/config" | ||
"github.com/aws/aws-sdk-go-v2/service/s3" | ||
) | ||
|
||
func New(bucketname, prefix string) (*S3Storage, error) { | ||
sdkConfig, err := config.LoadDefaultConfig(context.TODO()) | ||
if err != nil { | ||
return nil, fmt.Errorf("config load error: %s", err) | ||
} | ||
s3Client := s3.NewFromConfig(sdkConfig) | ||
|
||
return &S3Storage{ | ||
bucketname: bucketname, | ||
prefix: prefix, | ||
s3Client: s3Client, | ||
}, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,42 @@ | ||
package s3storage | ||
|
||
import ( | ||
"io/fs" | ||
"strings" | ||
) | ||
|
||
func (l *S3Storage) FileExists(filename string) bool { | ||
return false | ||
} | ||
|
||
func (l *S3Storage) ConfigPath(filename string) string { | ||
return CONFIG_PATH + "/" + strings.TrimLeft(filename, "/") | ||
} | ||
|
||
func (s *S3Storage) GetPath() string { | ||
return s.prefix | ||
} | ||
|
||
func (l *S3Storage) EnsurePath(pathname string) error { | ||
return nil | ||
} | ||
|
||
func (l *S3Storage) EnsureOwnership(filename, login string) error { | ||
return nil | ||
} | ||
|
||
func (l *S3Storage) Remove(name string) error { | ||
return nil | ||
} | ||
|
||
func (l *S3Storage) Rename(oldName, newName string) error { | ||
return nil | ||
} | ||
|
||
func (l *S3Storage) EnsurePermissions(name string, mode fs.FileMode) error { | ||
return nil | ||
} | ||
|
||
func (l *S3Storage) FileInfo(name string) (fs.FileInfo, error) { | ||
return nil, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,17 @@ | ||
package s3storage | ||
|
||
import ( | ||
"io" | ||
) | ||
|
||
func (l *S3Storage) ReadFile(name string) ([]byte, error) { | ||
return nil, nil | ||
} | ||
|
||
func (l *S3Storage) OpenFilesFromPos(names []string, pos int64) ([]io.ReadCloser, error) { | ||
return nil, nil | ||
} | ||
|
||
func (l *S3Storage) OpenFile(name string) (io.ReadCloser, error) { | ||
return nil, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
package s3storage | ||
|
||
import "github.com/aws/aws-sdk-go-v2/service/s3" | ||
|
||
const CONFIG_PATH = "config" | ||
|
||
type S3Storage struct { | ||
bucketname string | ||
prefix string | ||
s3Client *s3.Client | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
package s3storage | ||
|
||
import ( | ||
"bytes" | ||
"context" | ||
"fmt" | ||
"io" | ||
|
||
"github.com/aws/aws-sdk-go-v2/aws" | ||
"github.com/aws/aws-sdk-go-v2/service/s3" | ||
) | ||
|
||
func (s *S3Storage) WriteFile(name string, data []byte) error { | ||
_, err := s.s3Client.PutObject(context.TODO(), &s3.PutObjectInput{ | ||
Bucket: aws.String(s.bucketname), | ||
Key: aws.String(name), | ||
Body: bytes.NewReader(data), | ||
}) | ||
if err != nil { | ||
return fmt.Errorf("put object error: %s", err) | ||
} | ||
return nil | ||
} | ||
|
||
func (s *S3Storage) AppendFile(name string, data []byte) error { | ||
return nil | ||
} | ||
|
||
func (s *S3Storage) OpenFileForWriting(name string) (io.WriteCloser, error) { | ||
return nil, nil | ||
} | ||
|
||
func (s *S3Storage) OpenFileForAppending(name string) (io.WriteCloser, error) { | ||
return nil, nil | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,210 @@ | ||
import { Card, Container, Text, Table, Title, Button, Grid, Popover, Group, TextInput, rem, ActionIcon, Checkbox, Highlight, MultiSelect} from "@mantine/core"; | ||
import { AppSettings } from "../../Constants/Constants"; | ||
import { useInfiniteQuery } from "@tanstack/react-query"; | ||
import { useAuthContext } from "../../Auth/Auth"; | ||
import { Link, useSearchParams } from "react-router-dom"; | ||
import { TbArrowRight, TbSearch, TbSettings } from "react-icons/tb"; | ||
import { DatePickerInput } from "@mantine/dates"; | ||
import { useEffect, useState } from "react"; | ||
import React from "react"; | ||
|
||
type LogsDataResponse = { | ||
enabled: boolean; | ||
logEntries: LogEntry[]; | ||
environments: string[]; | ||
nextPos: number; | ||
tags: Tags[]; | ||
} | ||
type LogEntry = { | ||
data: string; | ||
timestamp: string; | ||
tags: Tag[]; | ||
} | ||
type Tags = { | ||
key: string; | ||
value: string; | ||
total: number; | ||
} | ||
type Tag = { | ||
key: string; | ||
value: string; | ||
} | ||
|
||
function getDate(date:Date) { | ||
var dd = String(date.getDate()).padStart(2, '0'); | ||
var mm = String(date.getMonth() + 1).padStart(2, '0'); //January is 0! | ||
var yyyy = date.getFullYear(); | ||
return yyyy + "-" + mm + '-' + dd; | ||
} | ||
|
||
export function Logs() { | ||
const {authInfo} = useAuthContext(); | ||
const timezoneOffset = new Date().getTimezoneOffset() * -1 | ||
const [currentQueryParameters] = useSearchParams(); | ||
const dateParam = currentQueryParameters.get("date") | ||
const [tags, setTags] = useState<Tag[]>([]) | ||
const [search, setSearch] = useState<string>("") | ||
const [searchParam, setSearchParam] = useState<string>("") | ||
const [columns, setColumns] = useState<string[]>([]) | ||
const [logsDate, setLogsDate] = useState<Date | null>(dateParam === null ? new Date() : new Date(dateParam)); | ||
const { isPending, fetchNextPage, hasNextPage, error, data } = useInfiniteQuery<LogsDataResponse>({ | ||
queryKey: ['logs', logsDate, tags, columns, searchParam], | ||
queryFn: async ({ pageParam }) => | ||
fetch(AppSettings.url + '/observability/logs?display-tags='+encodeURIComponent(columns.join(","))+'&fromDate='+(logsDate == undefined ? getDate(new Date()) : getDate(logsDate)) + '&endDate='+(logsDate == undefined ? getDate(new Date()) : getDate(logsDate)) + "&pos="+pageParam+"&offset="+timezoneOffset+"&filter-tags="+encodeURIComponent(tags.map(t => t.key + "=" + t.value).join(","))+"&search="+encodeURIComponent(searchParam), { | ||
headers: { | ||
"Content-Type": "application/json", | ||
"Authorization": "Bearer " + authInfo.token | ||
}, | ||
}).then((res) => { | ||
return res.json() | ||
} | ||
), | ||
initialPageParam: 0, | ||
getNextPageParam: (lastRequest) => lastRequest.nextPos === -1 ? null : lastRequest.nextPos, | ||
}) | ||
|
||
const captureEnter = (e: React.KeyboardEvent<HTMLDivElement>) => { | ||
if (e.key === "Enter") { | ||
setSearchParam(search) | ||
} | ||
} | ||
|
||
useEffect(() => { | ||
const handleScroll = () => { | ||
const { scrollTop, clientHeight, scrollHeight } = | ||
document.documentElement; | ||
if (scrollTop + clientHeight >= scrollHeight - 20) { | ||
fetchNextPage(); | ||
} | ||
}; | ||
|
||
window.addEventListener("scroll", handleScroll); | ||
return () => { | ||
window.removeEventListener("scroll", handleScroll); | ||
}; | ||
}, [fetchNextPage]) | ||
|
||
|
||
if(error) return 'A backend error has occurred: ' + error.message | ||
|
||
const rows = isPending ? [] : data.pages.map((group, groupIndex) => ( | ||
<React.Fragment key={groupIndex}> | ||
{group.logEntries.map((row, i) => ( | ||
<Table.Tr key={i}> | ||
<Table.Td>{row.timestamp}</Table.Td> | ||
{columns.map(function(column){ | ||
return <Table.Td>{row.tags.filter((tag) => tag.key === column).map((tag => { return tag.value }))}</Table.Td>; | ||
})} | ||
<Table.Td>{searchParam === "" ? row.data : <Highlight color="lime" highlight={searchParam}>{row.data}</Highlight>}</Table.Td> | ||
</Table.Tr> | ||
))} | ||
</React.Fragment> | ||
)); | ||
return ( | ||
<Container my={40} size="80rem"> | ||
<Title ta="center" style={{marginBottom: 20}}> | ||
Logs | ||
</Title> | ||
<Grid> | ||
<Grid.Col span={4}> | ||
<TextInput | ||
placeholder="Search..." | ||
rightSectionWidth={30} | ||
size="xs" | ||
leftSection={<TbSearch style={{ width: rem(18), height: rem(18) }} />} | ||
rightSection={ | ||
<ActionIcon size={18} radius="xl" variant="filled" onClick={() => setSearchParam(search)}> | ||
<TbArrowRight style={{ width: rem(14), height: rem(14) }} /> | ||
</ActionIcon> | ||
} | ||
onKeyDown={(e) => captureEnter(e)} | ||
onChange={(e) => setSearch(e.currentTarget.value)} | ||
value={search} | ||
/> | ||
</Grid.Col> | ||
<Grid.Col span={4}> | ||
<DatePickerInput | ||
value={logsDate} | ||
onChange={setLogsDate} | ||
size="xs" | ||
/> | ||
</Grid.Col> | ||
<Grid.Col span={2}> | ||
|
||
|
||
</Grid.Col> | ||
<Grid.Col span={2}> | ||
<Group> | ||
<Popover width={300} position="bottom" withArrow shadow="md"> | ||
<Popover.Target> | ||
<Button variant="default" size="xs">Columns</Button> | ||
</Popover.Target> | ||
<Popover.Dropdown> | ||
{data?.pages[0].tags | ||
.filter((element, i) => { | ||
if(i === 0 || element.key !== data?.pages[0].tags[i-1].key) { | ||
return true | ||
} else { | ||
return false | ||
} | ||
}) | ||
.map((element) => { | ||
return ( | ||
<Checkbox | ||
key={element.key} | ||
label={element.key} | ||
radius="xs" | ||
size="xs" | ||
style={{marginBottom: 3}} | ||
onChange={(event) => event.currentTarget.checked ? setColumns([...columns, element.key]) : setColumns(columns.filter((column) => { return column !== element.key } ))} | ||
checked={columns.some((column) => column === element.key)} | ||
/> | ||
) | ||
})} | ||
</Popover.Dropdown> | ||
</Popover> | ||
<Popover width={300} position="bottom" withArrow shadow="md"> | ||
<Popover.Target> | ||
<Button variant="default" size="xs">Filter</Button> | ||
</Popover.Target> | ||
<Popover.Dropdown> | ||
{data?.pages[0].tags.map((element) => { | ||
return ( | ||
<Checkbox | ||
key={element.key +"="+element.value} | ||
label={element.key + " = " + element.value.substring(0, 10) + (element.value.length > 10 ? "..." : "") + " (" + element.total + ")"} | ||
radius="xs" | ||
size="xs" | ||
style={{marginBottom: 3}} | ||
onChange={(event) => event.currentTarget.checked ? setTags([...tags, {key: element.key, value: element.value }]) : setTags(tags.filter((tag) => { return tag.key !== element.key || tag.value !== element.value } ))} | ||
checked={tags.some((tag) => tag.key === element.key && tag.value === element.value)} | ||
/> | ||
) | ||
})} | ||
</Popover.Dropdown> | ||
</Popover> | ||
</Group> | ||
</Grid.Col> | ||
</Grid> | ||
<Table> | ||
<Table.Thead> | ||
<Table.Tr key="heading"> | ||
<Table.Th>Timestamp</Table.Th> | ||
{columns.map(function(column){ | ||
return <Table.Th>{column}</Table.Th>; | ||
})} | ||
<Table.Th>Log</Table.Th> | ||
</Table.Tr> | ||
</Table.Thead> | ||
<Table.Tbody> | ||
{rows} | ||
</Table.Tbody> | ||
</Table> | ||
<Group justify="center"> | ||
{hasNextPage ? <Button onClick={() => fetchNextPage()} variant="default">Loading more...</Button> : null} | ||
</Group> | ||
|
||
</Container> | ||
|
||
) | ||
} |