diff --git a/antora.yml b/antora.yml index 8b5b1d8195..df496b588f 100644 --- a/antora.yml +++ b/antora.yml @@ -18,6 +18,10 @@ asciidoc: default_meta_keywords: tinymce, documentation, docs, plugins, customizable skins, configuration, examples, html, php, java, javascript, image editor, inline editor, distraction-free editor, classic editor, wysiwyg # product docker variables dockerimageimportfromwordexporttoword: registry.containers.tiny.cloud/docx-converter-tiny + dockerimageexporttopdf: registry.containers.tiny.cloud/pdf-converter-tiny + dockerimageexporttopdfwindows: registry.containers.tiny.cloud/pdf-converter-windows-tiny + # document converter placeholder variables + exportpdf_service_url: exportpdf_service_url placeholder # product variables productname: TinyMCE productmajorversion: 7 diff --git a/modules/ROOT/pages/individual-export-to-pdf-on-premises.adoc b/modules/ROOT/pages/individual-export-to-pdf-on-premises.adoc index d8e44101f3..40f8f4f3d1 100644 --- a/modules/ROOT/pages/individual-export-to-pdf-on-premises.adoc +++ b/modules/ROOT/pages/individual-export-to-pdf-on-premises.adoc @@ -2,4 +2,20 @@ :navtitle: Export to PDF :description: Setting up Export to PDF using Docker. :keywords: server-side, docker, export-to-pdf, on-premises -:pluginname: Export to PDF \ No newline at end of file +:pluginname: Export to PDF + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-overview.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-requirements.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-installation.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-fonts.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-autorization.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-api-usage.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-ssl-communication.adoc[] + +include::partial$individually-licensed-components/export-to-pdf/export-to-pdf-logs.adoc[] \ No newline at end of file diff --git a/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-api-usage.adoc b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-api-usage.adoc new file mode 100644 index 0000000000..6f7b93f9e4 --- /dev/null +++ b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-api-usage.adoc @@ -0,0 +1,42 @@ +[[api-usage]] +== API Usage + +The {pluginname} On-Premises converter provides the ability to convert an HTML document to a PDF file via Restful API. + +The API is available on `+http://localhost:[port]+` (by default the `port` is `8080`). + +[NOTE] +The REST API documentation is available at `+http://localhost:[port]/docs+`. +Alternatively, refer to the specifications in link:https://exportpdf.converter.tiny.cloud/docs[https://exportpdf.converter.tiny.cloud/docs^]. + +If the authorization for the API is enabled, provided an authorization token. More instructions can be found in the xref:individual-export-to-pdf-on-premises.adoc#authorization[authorization] section. + +=== Using additional HTTP headers + +If fetching some resources (e.g. images) used in a generated PDF requires passing an additional authorization factor in the form of additional HTTP headers: + +. It can be defined on the application startup by setting `EXTRA_HTTP_HEADERS` environmental variable where the value is a stringified JSON object with required headers. +. It can be defined in a request sent to the PDF Converter API in `options`: + +[source, js, subs="attributes+"] +---- +const data = { + html: '
I am a teapot
', + css: 'p { color: red; }', + options: { + extra_http_headers: { + authorization: 'BearerI am a teapot
", + css: "p { color: red; }", +}; + +const config = { + headers: { + 'Authorization': token + }, + responseType: 'arraybuffer', +}; + +axios.post( 'http://localhost:8080/v1/convert', data, config ) + .then( response => { + fs.writeFileSync('./file.pdf', response.data, 'binary'); + }).catch( error => { + console.log( error ); + }); +---- + +`SECRET_KEY` it’s the key which has been passed to the {pluginname} On-Premises instance + +Please refer to the link:https://exportpdf.converter.tiny.cloud/docs[{pluginname} REST API documentation] to start using the service. + +[NOTE] +If API clients like Postman or Insomnia are used, then set the JWT token as an `Authorization` header in the `Headers` tab. Do not use the built-in token authorization as this will generate invalid header with a `Bearer` prefix added to the token. \ No newline at end of file diff --git a/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-fonts.adoc b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-fonts.adoc new file mode 100644 index 0000000000..f950c20157 --- /dev/null +++ b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-fonts.adoc @@ -0,0 +1,65 @@ +[[fonts]] +== Fonts + +During document writing, the possibility of using many different fonts can be very important to users. + +Using the appropriate font can change the appearance of the document and emphasize its style. + +{pluginname} Converter allows link:https://exportpdf.converter.tiny.cloud/docs#section/Web-Fonts[Web Fonts^] to be used, which provided the integrator with the ability to use standard operating system fonts or use custom fonts without the need to import them using CSS. + +Below is a list of the basic fonts included in the image: + +[source] +---- +OpenSans-Bold.ttf +OpenSans-BoldItalic.ttf +OpenSans-ExtraBold.ttf +OpenSans-ExtraBoldItalic.ttf +OpenSans-Italic.ttf +OpenSans-Light.ttf +OpenSans-LightItalic.ttf +OpenSans-Regular.ttf +OpenSans-Semibold.ttf +OpenSans-SemiboldItalic.ttf +---- + +However, additional fonts can be added to {pluginname} Converter in two ways: + +* Use Unix-like PDF-Converter image `{dockerimageexporttopdf}` and mount fonts directory to it. +** See xref:individual-export-to-pdf-on-premises.adoc#add-custom-fonts-to-pdf-converter[Add custom fonts to PDF Converter] section. +* Use Windows PDF-Converter image `{dockerimageexporttopdf}` and mount to it fonts directory from the Windows operating system on which the container is running. +** See Use Windows fonts in PDF Converter section. + +[NOTE] +The fonts inside the mounted volume will be installed on the docker image operating system. Only the `.ttf` and `.otf` font formats are supported. If other font formats are used, these will need to be converted to the supported format prior or use fonts such as link:https://exportpdf.converter.tiny.cloud/docs#section/Web-Fonts[Web Fonts^]. + +[TIP] +Ensure that the converted fonts can be installed and used on your local machine first, before installing them on the docker container. + +[[add-custom-fonts-to-pdf-converter]] +=== Add custom fonts to PDF Converter + +If custom fonts are being used in PDF files, use the `pdf-converter-tiny` Docker image and mount the directory with the custom fonts for the PDF Converter application running on a machine with a Unix-like system (this includes Docker on Windows with a WSL backend). + +The `{dockerimageexporttopdf}` Docker image need to be run on a Unix-like operating system and mount the `~/your_fonts_dir:/usr/share/fonts/your_fonts_dir` volume. + +Launch the Docker container on Unix-like operating system example: + +[source, bash, subs="attributes+"] +---- +docker run --init -v ~/your_fonts_dir:/usr/share/fonts/your_fonts_dir -p 8080:8080 -e LICENSE_KEY=[your_license_key] {dockerimageexporttopdf}:[version] +---- + +[[use-windows-fonts-in-pdf-converter]] +=== Use Windows fonts in PDF Converter + +If using Windows fonts like Arial, Verdana, etc. in PDF files, use `pdf-converter-windows-tiny` Docker image that allows you to run the application on a machine with Windows operating system and mount fonts from the system. + +You just need to run `{dockerimageexporttopdf}` Docker image on Windows operating system and mount `C:\Windows\Fonts:C:\Windows\Fonts` volume. + +Launch the Docker container on Windows operating system example: + +[source, bash, subs="attributes+"] +---- +docker run -v C:\Windows\Fonts:C:\Windows\Fonts -p 8080:8080 --env LICENSE_KEY=[your_license_key] {dockerimageexporttopdfwindows}:[version] +---- \ No newline at end of file diff --git a/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-installation.adoc b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-installation.adoc new file mode 100644 index 0000000000..45195b0e28 --- /dev/null +++ b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-installation.adoc @@ -0,0 +1,98 @@ +[[installation]] +== Installation + +[NOTE] +A valid license key is needed in order to install {pluginname} On-Premises. +link:https://www.tiny.cloud/contact/[Contact us] for a trial license key. + +=== Supported technologies + +The application is provided as a docker image by default. + +It can be run with any Open Container runtime tool e.g. link:https://kubernetes.io/[Kubernetes], link:https://www.redhat.com/en/technologies/cloud-computing/openshift[OpenShift], link:https://podman.io/[Podman], link:https://docs.docker.com/[Docker] and many others. + +Refer to the xref:individual-export-to-pdf-on-premises.adoc#requirements[Requirements guide] for more information about the hardware and software requirements to run the {pluginname} On-Premises. + +=== Setting up the application using a Docker container + +. The username and password credentials supplied by Tiny are utilized for logging into the Docker registry and retrieving the Docker image. +. Containerize the application using `docker` or `docker-compose`. +. Use a demo page to verify if the application works properly. + +==== Containerize example using docker + +Login to Docker registry: + +[source, sh, subs="attributes+"] +---- +docker login -u [username] -p [password] registry.containers.tiny.cloud +---- + +Launch the Docker container: + +[source, sh, subs="attributes+"] +---- +docker run --init -p 8080:8080 -e LICENSE_KEY=[your_license_key] {dockerimageexporttopdf}:[version] +---- + +If using authorization provide the SECRET_KEY: + +[source, sh, subs="attributes+"] +---- +docker run --init -p 8080:8080 -e LICENSE_KEY=[your_license_key] -e SECRET_KEY=[your_secret_key] {dockerimageexporttopdf}:[version] +---- + +Read more about using authorization in the xref:individual-export-to-pdf-on-premises.adoc#authorization[authorization] section. + +==== Containerize example using docker-compose + +. Create the docker-compose.yml file: ++ +[source, yml, subs="attributes+"] +---- +version: "3.8" +services: + pdf-converter-tiny: + image: {dockerimageexporttopdf}:[version] + ports: + - "8080:8080" + restart: always + init: true + environment: + LICENSE_KEY: "license_key" + # Secret Key is optional + SECRET_KEY: "secret_key" + # Custom request origin is optional + CUSTOM_REQUEST_ORIGIN: "https://your_custom_origin" +---- ++ +For details on `SECRET_KEY` usage check the xref:individual-export-to-pdf-on-premises.adoc#authorization[authorization] section. ++ +. Run: + +[source, bash] +---- +docker-compose up +---- + +[NOTE] +==== +* Without a correct `LICENSE_KEY` the application will not start. +** If the license is invalid, a wrong license key error will display in the logs and the application will not run. +* It is advisable to override the SECRET_KEY variable using a unique and hard to guess string for security reasons. +* If the specific infrastructure has strict CORS enabled, then use the `CUSTOM_REQUEST_ORIGIN` variable to set the origin of requests made by the converter. The default value is `https://pdf-internal`. +==== + +=== Windows fonts support + +If using Windows fonts like Calibri, Verdana, etc. in PDF files, use the `pdf-converter-windows-tiny` Docker image and run it on a Windows operating system. + +See xref:individual-export-to-pdf-on-premises.adoc#fonts[Fonts] section for more details. + +=== Next steps + +Use the link:http://localhost:8080/v1/convert[http://localhost:8080/v1/convert] endpoint to export PDF files. Check out the xref:individual-export-to-pdf-on-premises.adoc#authorization[authorization] section to learn more about tokens and token endpoints. + +Use the demo page available on link:http://localhost:8080/demo[http://localhost:8080/demo] to generate an example PDF file. + +Refer to the {pluginname} REST API documentation on link:http://localhost:8080/docs[http://localhost:8080/docs] for more details. \ No newline at end of file diff --git a/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-logs.adoc b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-logs.adoc new file mode 100644 index 0000000000..6a23a39e6d --- /dev/null +++ b/modules/ROOT/partials/individually-licensed-components/export-to-pdf/export-to-pdf-logs.adoc @@ -0,0 +1,212 @@ +[[logs]] +== Logs + +The logs from {pluginname} On-Premises are written to `stdout` and `stderr`. Most of them are formatted in JSON. They can be used for monitoring or debugging purposes. In production environments, It is recommend storing the logs to files or using a distributed logging system (like ELK or CloudWatch). + +=== Monitoring {pluginname} with logs + +To get more insight into how the {pluginname} On-Premises is performing, logs can be used for monitoring. To enable these, add the `ENABLE_METRIC_LOGS=true` environment variable. + +=== Log structure + +The log structure contains the following information: + +* `handler`: A unified identifier of action. Use this field to identify calls. +* `traceId`: A unique RPC call ID. +* `tags`: A semicolon-separated list of tags. Use this field to filter metrics logs. +* `data`: An object containing additional information. It might vary between different transports. +* `data.duration`: The request duration in milliseconds. +* `data.transport`: The type of the request transport. It could be http or ws (websocket). +* `data.status`: The request status. It can be equal to success, fail, warning. +* `data.statusCode`: The response status in HTTP status code standard. + +Additionally, for the HTTP transport, the following information is included: + +* `data.url`: The URL path. +* `data.method`: The request method. + +In case of an error, `data.status` will be equal to failed and `data.message` will contain the error message. + +An example log for HTTP transport: + +[source] +---- +{ + "level": 30, + "time": "2021-03-09T11:15:09.154Z", + "msg": "Request summary", + "handler": "convertHtmlToPdf", + "traceId": "85f13d92-57df-4b3b-98bb-0ca41a5ae601", + "data": { + "duration": 2470, + "transport": "http", + "statusCode": 200, + "status": "success", + "url": "/v1/convert", + "method": "POST" + }, + "tags": "metrics" +} +---- +// verify if this is something we will add. +//// +See example charts to check how to use logs for monitoring purposes. +//// + +=== Docker + +The docker has built-in logging mechanisms that capture logs from the output of the containers. The default logging driver writes the logs to files. + +When using this driver, use the `docker logs` command to show logs from a container. The `-f` flag can be added to view logs in real time. Refer to the link:https://docs.docker.com/engine/reference/commandline/logs/[official Docker documentation^] for more information about the logs command. + +[NOTE] +When a container is running for a long period of time, the logs can take up a lot of space. To avoid this problem, make sure that the log rotation is enabled. This can be set with the `max-size` option. + +=== Distributed logging + +If running more than one instance of {pluginname} On-Premises, It is recommend using a distributed logging system. It allows for viewing and analyzing logs from all instances in one place. + +==== AWS CloudWatch and other cloud solutions + +If running {pluginname} On-Premises in the cloud, the simplest and recommended way is to use a service that is available at the selected provider. + +Here are some of the available services: + +* AWS: link:https://aws.amazon.com/CloudWatch[CloudWatch^] +* Google Cloud: link:https://cloud.google.com/logging[Cloud Logging^] +* Azure: link:https://azure.microsoft.com/en-us/services/monitor/[Azure Monitor^] + +To use CloudWatch with AWS ECS, a log group must be created before, and the log driver must be changed to `awslogs`. When the log driver is configured properly, logs will be streamed directly to CloudWatch. + +The `logConfiguration` may look similar to this: + +[source, json] +---- +"logConfiguration": { + "logDriver": "awslogs", + "options": { + "awslogs-region": "us-west-2", + "awslogs-group": "tinysource", + "awslogs-stream-prefix": "tiny-pdf-converter-logs" + } +} +---- + +Refer to the link:https://docs.aws.amazon.com/AmazonECS/latest/developerguide/using_awslogs.html[Using the awslogs Log Driver] article for more information. + +=== On-Premises solutions + +If using a specific infrastructure such as your own or for some reason cannot use the service offered by a provider, some on-premises distributed logging system can be used. + +There are a lot of solutions available, including: + +* link:https://www.elastic.co/what-is/elk-stack[ELK^] + link:https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-getting-started.html[Filebeat^] + +This is a stack built on top of Elasticsearch, Logstash and Kibana. In this configuration, Elasticsearch stores logs, Filebeat reads logs from Docker and sends them to Elasticsearch, and Kibana is used to view them. Logstash is not necessary because logs are already structured. + +* link:https://www.fluentd.org/[Fluentd^] + +It uses a dedicated link:https://docs.docker.com/config/containers/logging/fluentd[Docker log driver^] to send the logs. It has a built-in frontend, but can also be integrated with Elasticsearch and Kibana for better filtering. + +* link:https://www.graylog.org/[Graylog^] + +It uses a dedicated link:https://docs.docker.com/config/containers/logging/gelf[Docker^] log driver to send the logs. It has a built-in frontend and needs Elasticsearch to store the logs as well as a MongoDB database to store the configuration. + +==== Example configuration + +The example configuration uses Fluentd, Elasticsearch, and Kibana to capture logs from Docker. + +Before running {pluginname} On-Premises, prepare the logging services. For the purposes of this example, Docker Compose is used. Create the fluentd, elasticsearch and kibana services inside the docker-compose.yml file: + +[source, yaml] +---- +version: '3.7' +services: + fluentd: + build: ./fluentd + volumes: + - ./fluentd/fluent.conf:/fluentd/etc/fluent.conf + ports: + - "24224:24224" + - "24224:24224/udp" + + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:6.8.5 + expose: + - 9200 + ports: + - "9200:9200" + + kibana: + image: docker.elastic.co/kibana/kibana:6.8.5 + environment: + ELASTICSEARCH_HOSTS: "http://elasticsearch:9200" + ports: + - "5601:5601" +---- + +To integrate Fluentd with Elasticsearch, first install `fluent-plugin-elasticsearch` in the Fluentd image. To do this, create a `fluentd/Dockerfile` with the following content: + +[source, dockerfile] +---- +FROM fluent/fluentd:v1.10-1 + +USER root + +RUN apk add --no-cache --update build-base ruby-dev \ + && gem install fluent-plugin-elasticsearch \ + && gem sources --clear-all +---- + +Next, configure the input server and connection to Elasticsearch in the `fluentd/fluent.conf` file: + +[source, xml] +---- + +