Skip to content

Commit

Permalink
improvements but still does not work right
Browse files Browse the repository at this point in the history
  • Loading branch information
Matthew Hollick committed Sep 16, 2019
1 parent dc2ed3b commit 197c805
Show file tree
Hide file tree
Showing 11 changed files with 336 additions and 42 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
/data
7 changes: 7 additions & 0 deletions Dockerfiles/collectd/Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
FROM ubuntu

RUN apt-get update \
&& apt-get -y upgrade \
&& apt-get install -y collectd

CMD collectd -f
74 changes: 74 additions & 0 deletions carbon-clickhouse.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
[common]
# Prefix for store all internal carbon-clickhouse graphs. Supported macroses: {host}
metric-prefix = "carbon.agents.{host}"
# Endpoint for store internal carbon metrics. Valid values: "" or "local", "tcp://host:port", "udp://host:port"
metric-endpoint = "local"
# Interval of storing internal metrics. Like CARBON_METRIC_INTERVAL
metric-interval = "1m0s"
# GOMAXPROCS
max-cpu = 1

[logging]
# "stderr", "stdout" can be used as file name
file = "stdout"
# Logging error level. Valid values: "debug", "info", "warn" "error"
level = "warn"

[data]
# Folder for buffering received data
path = "/data/carbon-clickhouse/"
# Rotate (and upload) file interval.
# Minimize chunk-interval for minimize lag between point receive and store
chunk-interval = "1s"
# Auto-increase chunk interval if the number of unprocessed files is grown
# Sample, set chunk interval to 10 if unhandled files count >= 5 and set to 60s if unhandled files count >= 20:
# chunk-auto-interval = "5:10s,20:60s"
chunk-auto-interval = "5:5s,10:60s"

[upload.graphite_reverse]
type = "points-reverse"
table = "graphite_reverse"
threads = 1
url = "http://clickhouse:8123/"
timeout = "1m0s"

[upload.graphite_index]
type = "index"
table = "graphite_index"
threads = 1
url = "http://clickhouse:8123/"
timeout = "1m0s"
cache-ttl = "12h0m0s"

[upload.graphite_tagged]
type = "tagged"
table = "graphite_tagged"
threads = 1
url = "http://clickhouse:8123/"
timeout = "1m0s"
cache-ttl = "12h0m0s"

[udp]
listen = ":2003"
enabled = true

[tcp]
listen = ":2003"
enabled = true

[pickle]
listen = ":2004"
enabled = true

[prometheus]
listen = ":2006"
enabled = true

# https://github.com/lomik/carbon-clickhouse/blob/master/grpc/carbon.proto
[grpc]
listen = ":2005"
enabled = false

[pprof]
listen = "localhost:7007"
enabled = false
6 changes: 6 additions & 0 deletions client.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
#!/bin/sh

docker-compose exec clickhouse bash -c "
export HOME=/var/lib/clickhouse/
exec clickhouse client
"
94 changes: 94 additions & 0 deletions collectd.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#
# Config file for collectd(1).
# Please read collectd.conf(5) for a list of options.
# http://collectd.org/
#

##############################################################################
# Global #
#----------------------------------------------------------------------------#
# Global settings for the daemon. #
##############################################################################

FQDNLookup false

#----------------------------------------------------------------------------#
# When enabled, plugins are loaded automatically with the default options #
# when an appropriate <Plugin ...> block is encountered. #
# Disabled by default. #
#----------------------------------------------------------------------------#
AutoLoadPlugin true

#----------------------------------------------------------------------------#
# When enabled, internal statistics are collected, using "collectd" as the #
# plugin name. #
# Disabled by default. #
#----------------------------------------------------------------------------#
CollectInternalStats true

#----------------------------------------------------------------------------#
# Interval at which to query values. This may be overwritten on a per-plugin #
# base by using the 'Interval' option of the LoadPlugin block: #
# <LoadPlugin foo> #
# Interval 60 #
# </LoadPlugin> #
#----------------------------------------------------------------------------#
Interval 60

# Limit the size of the write queue. Default is no limit. Setting up a limit is
# recommended for servers handling a high volume of traffic.
WriteQueueLimitHigh 1000000
WriteQueueLimitLow 800000

##############################################################################
# Logging #
#----------------------------------------------------------------------------#
# Plugins which provide logging functions should be loaded first, so log #
# messages generated when loading or configuring other plugins can be #
# accessed. #
##############################################################################

LoadPlugin logfile
<Plugin logfile>
LogLevel err
File stdout
</Plugin>

##############################################################################
# LoadPlugin section #
#----------------------------------------------------------------------------#
# Lines beginning with a single `#' belong to plugins which have been built #
# but are disabled by default. #
# #
# Lines beginning with `##' belong to plugins which have not been built due #
# to missing dependencies or because they have been deactivated explicitly. #
##############################################################################

LoadPlugin cpu
LoadPlugin memory
LoadPlugin uptime

##############################################################################
# Plugin configuration #
#----------------------------------------------------------------------------#
# In this section configuration stubs for each plugin are provided. A desc- #
# ription of those options is available in the collectd.conf(5) manual page. #
##############################################################################

<Plugin cpu>
ReportByCpu true
ReportByState true
ValuesPercentage true
</Plugin>

<Plugin memory>
ValuesAbsolute true
ValuesPercentage true
</Plugin>

<Plugin "write_kafka">
Property "metadata.broker.list" "kafka:9092"
<Topic "collectd">
Format JSON
</Topic>
</Plugin>
57 changes: 32 additions & 25 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,13 @@ services:
- elasticsearch

clickhouse:
image: yandex/clickhouse-server
image: yandex/clickhouse-server:19.6.2.11
hostname: clickhouse
ports:
- 8123:8123
- 9000:9000
- 9009:9009
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
volumes:
- "./rollup.xml:/etc/clickhouse-server/config.d/rollup.xml"
- "./init.sql:/docker-entrypoint-initdb.d/init.sql"
- "./data/clickhouse/data:/var/lib/clickhouse/data"
- "./data/clickhouse/metadata:/var/lib/clickhouse/metadata"
depends_on:
- zookeeper

Expand Down Expand Up @@ -78,27 +74,30 @@ services:
depends_on:
- elasticsearch

graphite-clickhouse:
image: mosquito/graphite-clickhouse
hostname: graphite-clickhouse
environment:
CLICKHOUSE_URL: http://clickhouse:8123
carbon-clickhouse:
image: lomik/carbon-clickhouse:v0.10.2
hostname: carbon-clickhouse
volumes:
- "./data/carbon-clickhouse:/data/carbon-clickhouse"
- "./carbon-clickhouse.conf:/etc/carbon-clickhouse/carbon-clickhouse.conf"
ports:
- 9090:9090
- "2003:2003" # plain tcp
- "2004:2004" # pickle
- "2006:2006" # prometheus remote write
depends_on:
- clickhouse

carbon-clickhouse:
image: mosquito/carbon-clickhouse
hostname: carbon-clickhouse
environment:
CLICKHOUSE_URL: http://clickhouse:8123
graphite-clickhouse:
image: lomik/graphite-clickhouse:v0.11.1
hostname: graphite-clickhouse
volumes:
- "./rollup.xml:/etc/graphite-clickhouse/rollup.xml"
- "./graphite-clickhouse.conf:/etc/graphite-clickhouse/graphite-clickhouse.conf"
ports:
- 2003:2003
- 2004:2004
- 2005:2005
- 9090:9090
depends_on:
- clickhouse
- carbon-clickhouse

carbonapi:
image: mosquito/carbonapi
Expand Down Expand Up @@ -138,4 +137,12 @@ services:
image: docker.elastic.co/logstash/logstash:7.3.1
hostname: logstash-post
depends_on:
- elasticsearch
- elasticsearch

collectd:
image: collectd
hostname: collectd
volumes:
- ./collectd.conf:/etc/collectd/collectd.conf
depends_on:
- kafka
19 changes: 19 additions & 0 deletions graphite-clickhouse.conf
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
[common]
listen = ":9090"
max-cpu = 8

[clickhouse]
url = "http://clickhouse:8123/?max_query_size=2097152&readonly=2"
index-table = "graphite_index"
data-timeout = "1m0s"
index-timeout = "1m0s"
tagged-table = "graphite_tagged"

[[data-table]]
table = "graphite_reverse"
reverse = true
rollup-conf = "/etc/graphite-clickhouse/rollup.xml"

[logging]
file = "stdout"
level = "info"
28 changes: 28 additions & 0 deletions init.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
CREATE TABLE IF NOT EXISTS default.graphite_reverse (
Path String,
Value Float64,
Time UInt32,
Date Date,
Timestamp UInt32
) ENGINE = GraphiteMergeTree('graphite_rollup')
PARTITION BY toYYYYMM(Date)
ORDER BY (Path, Time);

CREATE TABLE IF NOT EXISTS default.graphite_index (
Date Date,
Level UInt32,
Path String,
Version UInt32
) ENGINE = ReplacingMergeTree(Version)
PARTITION BY toYYYYMM(Date)
ORDER BY (Level, Path, Date);

CREATE TABLE IF NOT EXISTS default.graphite_tagged (
Date Date,
Tag1 String,
Path String,
Tags Array(String),
Version UInt32
) ENGINE = ReplacingMergeTree(Version)
PARTITION BY toYYYYMM(Date)
ORDER BY (Tag1, Path, Date);
23 changes: 6 additions & 17 deletions riemann.config
Original file line number Diff line number Diff line change
Expand Up @@ -24,24 +24,13 @@
(expired
(fn [event] (info "expired" event))))))

(def elastic
(elasticsearch {:es-endpoint "http://elasticsearch:9200"
:es-index "riemann"
:index-suffix "-yyyy.MM.dd"
:type "event"}
(fn [event]
(merge event {}))))

(def graph
(graphite {:host "carbon-clickhouse"}))

(kafka-consumer {:consumer.config {:bootstrap.servers "kafka:9092"
:group.id "riemann"}
:topics ["program-logs"]})
:group.id "collectd"}
:topics ["collectd"]})

(streams
elastic)

;(defn- transmit
; [writer socket message]
; (let [bytes-out (bytes (byte-array (map byte message)))]
; (.write writer bytes-out 0 (count bytes-out))
; (.flush writer)))

graph)
Loading

0 comments on commit 197c805

Please sign in to comment.