forked from signalfx/splunk-otel-collector
-
Notifications
You must be signed in to change notification settings - Fork 0
/
metadata.yaml
64 lines (64 loc) · 1.88 KB
/
metadata.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
name: databricksreceiver
attributes:
databricks.instance.name:
description: The name of the Databricks instance as defined by the value of the "instance_name" field in the config
job_id:
description: The numeric ID of the Databricks job
task_id:
description: The name of the Databricks task
task_type:
description: The type of the Databricks task
enum:
- NotebookTask
- SparkJarTask
- SparkPythonTask
- PipelineTask
- PythonWheelTask
- SparkSubmitTask
metrics:
databricks.jobs.total:
enabled: true
description: A snapshot of the total number of jobs registered in the Databricks instance taken at each scrape
unit: "{jobs}"
gauge:
value_type: int
databricks.jobs.schedule.status:
enabled: true
description: A snapshot of the pause/run status per job taken at each scrape
extended_documentation: 0=PAUSED, 1=UNPAUSED, 2=NOT_SCHEDULED
unit: "{status}"
gauge:
value_type: int
attributes:
[job_id]
databricks.tasks.schedule.status:
enabled: true
description: A snapshot of the pause/run status per task taken at each scrape
extended_documentation: 0=PAUSED, 1=UNPAUSED, 2=NOT_SCHEDULED
unit: "{status}"
gauge:
value_type: int
attributes:
[job_id, task_id, task_type]
databricks.jobs.active.total:
enabled: true
description: A snapshot of the number of active jobs taken at each scrape
unit: "{jobs}"
gauge:
value_type: int
databricks.jobs.run.duration:
enabled: true
description: The execution duration in milliseconds per completed job
unit: ms
gauge:
value_type: int
attributes:
[job_id]
databricks.tasks.run.duration:
enabled: true
description: The execution duration in milliseconds per completed task
unit: ms
gauge:
value_type: int
attributes:
[job_id, task_id]