Skip to content

Commit

Permalink
Go fmt
Browse files Browse the repository at this point in the history
  • Loading branch information
leoleovich committed Apr 7, 2017
1 parent aa76065 commit c907ccb
Show file tree
Hide file tree
Showing 6 changed files with 104 additions and 106 deletions.
16 changes: 8 additions & 8 deletions client.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func (c Client) runClient() {
continue
} else {
// We set dead line for connection to write. It should be the rest of we have for client interval
err := conn.SetWriteDeadline(time.Now().Add(time.Duration(c.conf.ClientSendInterval - c.conf.ConnectTimeout - 1)*time.Second))
err := conn.SetWriteDeadline(time.Now().Add(time.Duration(c.conf.ClientSendInterval-c.conf.ConnectTimeout-1) * time.Second))
if err != nil {
c.lg.Println("Can not set deadline for connection: ", err.Error())
connectionFailed = true
Expand All @@ -139,10 +139,10 @@ func (c Client) runClient() {
if !connectionFailed {
retryFileMetrics := readMetricsFromFile(c.conf.RetryFile)
for numOfMetricFromFile, metricFromFile := range retryFileMetrics {
if numOfMetricFromFile + 1 < c.lc.mainBufferSize {
if numOfMetricFromFile+1 < c.lc.mainBufferSize {
err = c.tryToSendToGraphite(metricFromFile, conn)
if err != nil {
c.lg.Printf("Error happened in the middle of writing retry metrics. Resaving %d metrics\n", len(retryFileMetrics) - numOfMetricFromFile)
c.lg.Printf("Error happened in the middle of writing retry metrics. Resaving %d metrics\n", len(retryFileMetrics)-numOfMetricFromFile)
// If we failed to write a metric to graphite - something is wrong with connection
c.saveSliceToRetry(retryFileMetrics[numOfMetricFromFile:])
connectionFailed = true
Expand All @@ -152,7 +152,7 @@ func (c Client) runClient() {
}

} else {
c.lg.Printf("Can read only %d metrics from %s. Rest will be kept for the next run", numOfMetricFromFile + 1, c.conf.RetryFile)
c.lg.Printf("Can read only %d metrics from %s. Rest will be kept for the next run", numOfMetricFromFile+1, c.conf.RetryFile)
c.saveSliceToRetry(retryFileMetrics[numOfMetricFromFile:])
break
}
Expand Down Expand Up @@ -183,20 +183,20 @@ func (c Client) runClient() {

bufSize = len(c.ch)
if !connectionFailed {
for processedMainBuff := 0; processedMainBuff < bufSize; processedMainBuff, processedTotal = processedMainBuff + 1, processedTotal + 1 {
for processedMainBuff := 0; processedMainBuff < bufSize; processedMainBuff, processedTotal = processedMainBuff+1, processedTotal+1 {
if processedTotal < c.lc.mainBufferSize {
err = c.tryToSendToGraphite(<-c.ch, conn)
if err != nil {
c.lg.Printf("Error happened in the middle of writing metrics. Saving %d metrics\n", bufSize - processedMainBuff)
c.saveChannelToRetry(c.ch, bufSize - processedMainBuff)
c.lg.Printf("Error happened in the middle of writing metrics. Saving %d metrics\n", bufSize-processedMainBuff)
c.saveChannelToRetry(c.ch, bufSize-processedMainBuff)
break
}
} else {
/*
Save only data for the moment of run. Concurrent goroutines know no mercy
and they continue to write...
*/
c.saveChannelToRetry(c.ch, bufSize - processedMainBuff)
c.saveChannelToRetry(c.ch, bufSize-processedMainBuff)
break
}
}
Expand Down
74 changes: 36 additions & 38 deletions grafsy.go
Original file line number Diff line number Diff line change
@@ -1,47 +1,46 @@
package main

import (
"log"
"os"
"sync"
"github.com/BurntSushi/toml"
"flag"
"fmt"
"github.com/BurntSushi/toml"
"github.com/naegelejd/go-acl"
"log"
"net"
"syscall"
"os"
"path/filepath"
"flag"
"regexp"
"github.com/naegelejd/go-acl"
"sync"
"syscall"
)

type Config struct {
Supervisor string
Supervisor string
ClientSendInterval int
MetricsPerSecond int
GraphiteAddr string // Think about multiple servers
ConnectTimeout int
LocalBind string
Log string
MetricDir string
UseACL bool
RetryFile string
SumPrefix string
AvgPrefix string
MinPrefix string
MaxPrefix string
AggrInterval int
AggrPerSecond int
MonitoringPath string
AllowedMetrics string
MetricsPerSecond int
GraphiteAddr string // Think about multiple servers
ConnectTimeout int
LocalBind string
Log string
MetricDir string
UseACL bool
RetryFile string
SumPrefix string
AvgPrefix string
MinPrefix string
MaxPrefix string
AggrInterval int
AggrPerSecond int
MonitoringPath string
AllowedMetrics string
}

type LocalConfig struct {
mainBufferSize int
aggrBufSize int
aggrBufSize int
fileMetricSize int
}


func main() {
var configFile string
flag.StringVar(&configFile, "c", "/etc/grafsy/grafsy.toml", "Path to config file.")
Expand All @@ -51,9 +50,9 @@ func main() {
if _, err := toml.DecodeFile(configFile, &conf); err != nil {
fmt.Println("Failed to parse config file", err.Error())
}
f, err := os.OpenFile(conf.Log, os.O_RDWR | os.O_CREATE | os.O_APPEND, 0660)
f, err := os.OpenFile(conf.Log, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0660)
if err != nil {
log.Println("Can not open file "+ conf.Log, err.Error())
log.Println("Can not open file", conf.Log, err.Error())
os.Exit(1)
}
lg := log.New(f, "", log.Ldate|log.Lmicroseconds|log.Lshortfile)
Expand All @@ -73,7 +72,7 @@ func main() {

/*
Units - metric
*/
*/
lc := LocalConfig{
/*
This is a main buffer
Expand All @@ -82,21 +81,20 @@ func main() {
This buffer is ready to take MetricsPerSecond*ClientSendInterval. Which gives you the rule, than bigger interval you have or
amount of metric in interval, than more metrics it can take in memory.
*/
conf.MetricsPerSecond*conf.ClientSendInterval,
conf.MetricsPerSecond * conf.ClientSendInterval,
/*
This is a aggr buffer. I assume it make total sense to have maximum buf = PerSecond*Interval.
For example up to 100*60
*/
conf.AggrPerSecond*conf.AggrInterval,
conf.AggrPerSecond * conf.AggrInterval,
/*
Retry file will take only 10 full buffers
*/
conf.MetricsPerSecond*conf.ClientSendInterval*10}

*/
conf.MetricsPerSecond * conf.ClientSendInterval * 10}

if _, err := os.Stat(filepath.Dir(conf.Log)); os.IsNotExist(err) {
if os.MkdirAll(filepath.Dir(conf.Log), os.ModePerm) != nil {
log.Println("Can not create logfile's dir " + filepath.Dir(conf.Log))
log.Println("Can not create logfile's dir ", filepath.Dir(conf.Log))
}
}

Expand All @@ -109,7 +107,7 @@ func main() {
/*
Check if directories for temporary files exist
This is especially important when your metricDir is in /tmp
*/
*/
oldUmask := syscall.Umask(0)

if _, err := os.Stat(conf.MetricDir); os.IsNotExist(err) {
Expand All @@ -123,7 +121,7 @@ func main() {
Unfortunately some people write to MetricDir with random permissions
To avoid server crashing and overflowing we need to set ACL on MetricDir, that grafsy is allowed
to read/delete files in there
*/
*/
if conf.UseACL {
ac, err := acl.Parse("user::rw group::rw mask::r other::r")
if err != nil {
Expand Down Expand Up @@ -181,4 +179,4 @@ func main() {

wg.Add(1)
wg.Wait()
}
}
6 changes: 3 additions & 3 deletions metric.go
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
package main

import (
"os"
"bufio"
"os"
)

type MetricData struct {
value float64
value float64
amount int64
}

Expand Down Expand Up @@ -46,4 +46,4 @@ func getSizeInLinesFromFile(file string) int {
res++
}
return res
}
}
44 changes: 22 additions & 22 deletions monitoring.go
Original file line number Diff line number Diff line change
@@ -1,35 +1,35 @@
package main

import (
"strconv"
"log"
"os"
"strconv"
"strings"
"time"
"log"
)

type Monitoring struct {
conf Config
got Source
saved int
sent int
conf Config
got Source
saved int
sent int
dropped int
invalid int
lg log.Logger
ch chan string
lg log.Logger
ch chan string
}
type Source struct {
net int
dir int
net int
dir int
retry int
}

const monitorMetrics = 7
const monitorMetrics = 7

func (m *Monitoring) generateOwnMonitoring(){
func (m *Monitoring) generateOwnMonitoring() {

now := strconv.FormatInt(time.Now().Unix(),10)
hostname,_ := os.Hostname()
now := strconv.FormatInt(time.Now().Unix(), 10)
hostname, _ := os.Hostname()
path := strings.Replace(m.conf.MonitoringPath, "HOSTNAME", strings.Replace(hostname, ".", "_", -1), -1) + ".grafsy."

// If you add a new one - please increase monitorMetrics
Expand All @@ -45,29 +45,29 @@ func (m *Monitoring) generateOwnMonitoring(){

for _, metric := range monitor_slice {
select {
case m.ch <- metric:
default:
m.lg.Printf("Too many metrics in the MON queue! This is very bad")
m.dropped++
case m.ch <- metric:
default:
m.lg.Printf("Too many metrics in the MON queue! This is very bad")
m.dropped++
}
}

}

func (m *Monitoring) clean(){
func (m *Monitoring) clean() {
m.saved = 0
m.sent = 0
m.dropped = 0
m.invalid = 0
m.got = Source{0,0,0}
m.got = Source{0, 0, 0}
}

func (m *Monitoring) runMonitoring() {
for ;; time.Sleep(60*time.Second) {
for ; ; time.Sleep(60 * time.Second) {
m.generateOwnMonitoring()
if m.dropped != 0 {
m.lg.Printf("Too many metrics in the main buffer. Had to drop incommings")
}
m.clean()
}
}
}
Loading

0 comments on commit c907ccb

Please sign in to comment.