forked from ElektraInitiative/PermaplanT
-
Notifications
You must be signed in to change notification settings - Fork 0
/
book.toml
57 lines (46 loc) · 1.76 KB
/
book.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
[book]
authors = ["PermaplanT Contributors"]
language = "en"
multilingual = false
src = "doc"
title = "PermaplanT"
[preprocessor.generate-summary]
get_chapter_name_from_file = true
[preprocessor.mermaid]
command = "mdbook-mermaid"
[output.html]
additional-js = ["doc/mermaid.min.js", "doc/mermaid-init.js"]
[output.html.fold]
enable = true # whether or not to enable section folding
[output.linkcheck]
# Should we check links on the internet? Enabling this option adds a
# non-negligible performance impact
follow-web-links = false
# Are we allowed to link to files outside of the book's root directory? This
# may help prevent linking to sensitive files (e.g. "../../../../etc/shadow")
traverse-parent-directories = false
# If necessary, you can exclude one or more links from being checked with a
# list of regular expressions. The regex will be applied to the link href (i.e.
# the `./index.html` in `[some page](./index.html)`) so it can be used to
# ignore both web and filesystem links.
#
# Hint: you can use TOML's raw strings (single quote) to avoid needing to
# escape things twice.
exclude = []
# The User-Agent to use when sending web requests
user-agent = "mdbook-linkcheck-0.4.0"
# The number of seconds a cached result is valid for (12 hrs by default)
cache-timeout = 43200
# How should warnings be treated?
#
# - "warn" will emit warning messages
# - "error" treats all warnings as errors, failing the linkcheck
# - "ignore" will ignore warnings, suppressing diagnostic messages and allowing
# the linkcheck to continuing
warning-policy = "error"
# Extra HTTP headers that must be send to certain web sites
# in order to link check to succeed.
#
# This is a dictionary (map), with keys being regexes
# matching a set of web sites, and values being an array of
# the headers.