diff --git a/repo/packages/C/confluent-connect/7/config.json b/repo/packages/C/confluent-connect/7/config.json new file mode 100644 index 000000000..2a14d875b --- /dev/null +++ b/repo/packages/C/confluent-connect/7/config.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/schema#", + "properties": { + "connect": { + "properties": { + "name": { + "default": "connect", + "description": "Service name for the connect worker application(s)", + "type": "string" + }, + "instances": { + "default": 1, + "description": "Number of instances to run.", + "minimum": 1, + "type": "integer" + }, + "cpus": { + "default": 2, + "description": "CPU shares to allocate to each connect worker instance.", + "minimum": 1, + "type": "number" + }, + "mem": { + "default": 1024, + "description": "Memory (MB) to allocate to each connect worker instance.", + "minimum": 512, + "type": "number" + }, + "heap": { + "default": 768, + "description": "JVM heap allocation (in MB) for connect worker task; should be ~256MB less than total memory for the instance.", + "minimum": 256, + "type": "number" + }, + "role": { + "default": "*", + "description": "Deploy connect worker only on nodes with this role.", + "type": "string" + }, + "kafka-service": { + "default": "confluent-kafka", + "description": "Target Apache Kafka by Confluent service to which these tasks will connect. ", + "type": "string" + }, + "zookeeper-connect": { + "default": "master.mesos:2181/dcos-service-confluent-kafka", + "description": "Zookeeper Connect string for service cluster. Format is comma-separated list of :/", + "type": "string" + }, + "schema-registry-service": { + "default": "schema-registry", + "description": "Schema Registry service to be used by connect workers. The named VIP associated with this service will be used to specify the converter-schema-registry-url's", + + "type": "string" + } + }, + "required": ["cpus", "mem", "instances", "name"], + "type": "object" + } + }, + "type": "object" +} diff --git a/repo/packages/C/confluent-connect/7/marathon.json.mustache b/repo/packages/C/confluent-connect/7/marathon.json.mustache new file mode 100644 index 000000000..c2ce04fd2 --- /dev/null +++ b/repo/packages/C/confluent-connect/7/marathon.json.mustache @@ -0,0 +1,69 @@ +{ + "id": "/{{connect.name}}", + "instances": {{connect.instances}}, + "cpus": {{connect.cpus}}, + "mem": {{connect.mem}}, + "maintainer": "partner-support@confluent.io", + "container": { + "type": "DOCKER", + "docker": { + "image": "{{resource.assets.container.docker.image}}", + "forcePullImage": true, + "network": "BRIDGE", + "portMappings": [ { + "containerPort": 8083, + "hostPort": 0, + "protocol": "tcp", + "labels": { + "VIP_0": "{{connect.name}}:8083" + } + } ] + } + }, + "portDefinitions": [ { + "name": "{{connect.name}}", + "port": 8083, + "protocol": "tcp", + "labels": { + "VIP_0": "{{connect.name}}:8083" + } + } ], + "env": { + "CONNECT_BOOTSTRAP_SERVERS": "broker.{{connect.kafka-service}}.l4lb.thisdcos.directory:9092", + "CONNECT_REST_PORT": "8083", + "CONNECT_GROUP_ID": "dcos-{{connect.name}}-group", + "CONNECT_CONFIG_STORAGE_TOPIC": "dcos-{{connect.name}}-configs", + "CONNECT_OFFSET_STORAGE_TOPIC": "dcos-{{connect.name}}-offsets", + "CONNECT_STATUS_STORAGE_TOPIC": "dcos-{{connect.name}}-status", + "CONNECT_KEY_CONVERTER" : "io.confluent.connect.avro.AvroConverter", + "CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL": "http://{{connect.schema-registry-service}}.marathon.l4lb.thisdcos.directory:8081", + "CONNECT_VALUE_CONVERTER" : "io.confluent.connect.avro.AvroConverter", + "CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL": "http://{{connect.schema-registry-service}}.marathon.l4lb.thisdcos.directory:8081", + "CONNECT_INTERNAL_KEY_CONVERTER" : "org.apache.kafka.connect.json.JsonConverter", + "CONNECT_INTERNAL_VALUE_CONVERTER" : "org.apache.kafka.connect.json.JsonConverter", + "CONNECT_PRODUCER_INTERCEPTOR_CLASSES" : "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor", + "CONNECT_CONSUMER_INTERCEPTOR_CLASSES" : "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor", + "CONNECT_ZOOKEEPER_CONNECT": "{{connect.zookeeper-connect}}", + "KAFKA_HEAP_OPTS": "-Xmx{{connect.heap}}M" + }, + "healthChecks": [ + { + "protocol": "HTTP", + "portIndex": 0, + "path": "/", + "gracePeriodSeconds": 60, + "intervalSeconds": 60, + "timeoutSeconds": 20, + "maxConsecutiveFailures": 3, + "ignoreHttp1xx": false + } + ], + "acceptedResourceRoles": [ + "{{connect.role}}" + ], + "labels": { + "DCOS_SERVICE_NAME": "{{connect.name}}", + "DCOS_SERVICE_SCHEME": "http", + "DCOS_SERVICE_PORT_INDEX": "0" + } +} diff --git a/repo/packages/C/confluent-connect/7/package.json b/repo/packages/C/confluent-connect/7/package.json new file mode 100644 index 000000000..d1d9d7e05 --- /dev/null +++ b/repo/packages/C/confluent-connect/7/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-connect", + "version": "1.0.0-3.2.2", + "scm": "https://github.com/confluentinc/kafka", + "description": "Confluent Connect worker\n\n\tDocumentation: http://docs.confluent.io/3.2.2/connect/managing.html", + "maintainer": "partner-support@confluent.io", + "tags": ["kafka", "confluent", "connect"], + "preInstallNotes": "This DC/OS Service is currently in preview. Preparing to install confluent-connect", + "postInstallNotes": "confluent-connect has been installed.", + "postUninstallNotes": "confluent-connect was uninstalled successfully.", + "minDcosReleaseVersion" : "1.8", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + } + ] +} diff --git a/repo/packages/C/confluent-connect/7/resource.json b/repo/packages/C/confluent-connect/7/resource.json new file mode 100644 index 000000000..dbfcf3183 --- /dev/null +++ b/repo/packages/C/confluent-connect/7/resource.json @@ -0,0 +1,14 @@ +{ + "assets": { + "container": { + "docker": { + "image": "confluentinc/cp-kafka-connect:3.2.2" + } + } + }, + "images": { + "icon-small": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_small.png", + "icon-medium": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_medium.png", + "icon-large": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_large.png" + } +} diff --git a/repo/packages/C/confluent-control-center/8/config.json b/repo/packages/C/confluent-control-center/8/config.json new file mode 100644 index 000000000..441738900 --- /dev/null +++ b/repo/packages/C/confluent-control-center/8/config.json @@ -0,0 +1,80 @@ +{ + "$schema": "http://json-schema.org/schema#", + "properties": { + "control-center": { + "properties": { + "name": { + "default": "control-center", + "description": "Name for this control-center application", + "type": "string" + }, + "instances": { + "default": 1, + "description": "Number of instances to run.", + "minimum": 1, + "type": "integer" + }, + "cpus": { + "default": 2, + "description": "CPU shares to allocate to each control-center instance.", + "minimum": 2, + "type": "number" + }, + "mem": { + "default": 4096, + "description": "Memory (MB) to allocate to each control-center task.", + "minimum": 4096, + "type": "number" + }, + "role": { + "default": "*", + "description": "Deploy control-center only on nodes with this role.", + "type": "string" + }, + "kafka-service": { + "default": "confluent-kafka", + "description": "Target Apache Kafka by Confluent service to which these tasks will connect. ", + "type": "string" + }, + "connect-service": { + "default": "connect", + "description": "Service name of Kafka Connect Workers to which this instance will deploy connectors.", + "type": "string" + }, + "confluent-controlcenter-internal-topics-partitions": { + "default": 3, + "description": "Parition count for internal control-center kafka topics", + "type": "number" + }, + "confluent-controlcenter-internal-topics-replication": { + "default": 2, + "description": "Replication factor for internal control-center kafka topics", + "type": "number" + }, + "confluent-monitoring-interceptor-topic-partitions": { + "default": 3, + "description": "Parition count for kafka topics used to store data from the interceptor classes", + "type": "number" + }, + "confluent-monitoring-interceptor-topic-replication": { + "default": 2, + "description": "Replication factor for kafka topics used to store data from the interceptor classes", + "type": "number" + }, + "confluent-license": { + "default": "", + "description": "License key for Confluent Enterprise (default is 30-day trial)", + "type": "string" + }, + "zookeeper-connect": { + "default": "master.mesos:2181/dcos-service-confluent-kafka", + "description": "Zookeeper Connect string for service cluster. Format is comma-separated list of :/", + "type": "string" + } + }, + "required": ["cpus", "mem", "instances", "name"], + "type": "object" + } + }, + "type": "object" +} diff --git a/repo/packages/C/confluent-control-center/8/marathon.json.mustache b/repo/packages/C/confluent-control-center/8/marathon.json.mustache new file mode 100644 index 000000000..afaeef9c0 --- /dev/null +++ b/repo/packages/C/confluent-control-center/8/marathon.json.mustache @@ -0,0 +1,49 @@ +{ + "id": "/{{control-center.name}}", + "instances": {{control-center.instances}}, + "cpus": {{control-center.cpus}}, + "mem": {{control-center.mem}}, + "maintainer": "partner-support@confluent.io", + "container": { + "type": "DOCKER", + "docker": { + "image": "{{resource.assets.container.docker.image}}", + "forcePullImage": true, + "network": "BRIDGE", + "portMappings": [ { + "containerPort": 9021, + "hostPort": 0, + "protocol": "tcp" + } ] + } + }, + "env": { + "CONTROL_CENTER_BOOTSTRAP_SERVERS": "broker.{{control-center.kafka-service}}.l4lb.thisdcos.directory:9092", + "CONTROL_CENTER_CONNECT_CLUSTER": "{{control-center.connect-service}}.marathon.l4lb.thisdcos.directory:8083", + "CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS": "{{control-center.confluent-controlcenter-internal-topics-partitions}}", + "CONTROL-CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS": "{{control-center.confluent-monitoring-interceptor-topic-partitions}}", + "CONTROL_CENTER_REPLICATION_FACTOR": "{{control-center.confluent-controlcenter-internal-topics-replication}}", + "CONTROL_CENTER_LICENSE": "{{control-center.confluent-license}}", + "CONTROL_CENTER_ZOOKEEPER_CONNECT": "{{control-center.zookeeper-connect}}" + }, + "healthChecks": [ + { + "protocol": "HTTP", + "portIndex": 0, + "path": "/", + "gracePeriodSeconds": 3600, + "intervalSeconds": 60, + "timeoutSeconds": 5, + "maxConsecutiveFailures": 10, + "ignoreHttp1xx": false + } + ], + "acceptedResourceRoles": [ + "{{control-center.role}}" + ], + "labels": { + "DCOS_SERVICE_NAME": "{{control-center.name}}", + "DCOS_SERVICE_SCHEME": "http", + "DCOS_SERVICE_PORT_INDEX": "0" + } +} diff --git a/repo/packages/C/confluent-control-center/8/package.json b/repo/packages/C/confluent-control-center/8/package.json new file mode 100644 index 000000000..f61276c92 --- /dev/null +++ b/repo/packages/C/confluent-control-center/8/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-control-center", + "version": "1.0.0-3.2.2", + "scm": "https://github.com/confluentinc/control-center", + "description": "Confluent Control Center service\n\n\tDocumentation: http://docs.confluent.io/3.2.2/control-center/docs/userguide.html\n\tDC/OS Specifics: https://www.confluent.io/whitepaper/deploying-confluent-platform-with-mesosphere", + "maintainer": "partner-support@confluent.io", + "tags": ["kafka", "confluent", "control", "center"], + "preInstallNotes": "Preparing to install confluent-control-center", + "postInstallNotes": "confluent-control-center has been installed.", + "postUninstallNotes": "confluent-control-center was uninstalled successfully.", + "minDcosReleaseVersion" : "1.8", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://raw.githubusercontent.com/confluentinc/control-center/master/LICENSE" + } + ] +} diff --git a/repo/packages/C/confluent-control-center/8/resource.json b/repo/packages/C/confluent-control-center/8/resource.json new file mode 100644 index 000000000..5131f1fee --- /dev/null +++ b/repo/packages/C/confluent-control-center/8/resource.json @@ -0,0 +1,14 @@ +{ + "assets": { + "container": { + "docker": { + "image": "confluentinc/cp-enterprise-control-center:3.2.2" + } + } + }, + "images": { + "icon-small": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_small.png", + "icon-medium": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_medium.png", + "icon-large": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_large.png" + } +} diff --git a/repo/packages/C/confluent-kafka/18/command.json b/repo/packages/C/confluent-kafka/18/command.json new file mode 100644 index 000000000..1379fc483 --- /dev/null +++ b/repo/packages/C/confluent-kafka/18/command.json @@ -0,0 +1,5 @@ +{ + "pip": [ + "https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/bin_wrapper-0.0.1-py2.py3-none-any.whl" + ] +} diff --git a/repo/packages/C/confluent-kafka/18/config.json b/repo/packages/C/confluent-kafka/18/config.json new file mode 100644 index 000000000..cc67daac6 --- /dev/null +++ b/repo/packages/C/confluent-kafka/18/config.json @@ -0,0 +1,789 @@ +{ + "type":"object", + "properties":{ + "service":{ + "type":"object", + "description": "DC/OS service configuration properties", + "properties":{ + "name" : { + "description":"The name of the Apache Kafka by Confluent instance", + "type":"string", + "default":"confluent-kafka" + }, + "user": { + "type": "string", + "description": "The user that the service will run as.", + "default": "root" + }, + "principal": { + "description": "The principal for the Kafka service instance.", + "type": "string", + "default": "confluent-kafka-principal" + }, + "secret_name": { + "description": "Name of the Secret Store credentials to use for DC/OS service authentication. This should be left empty unless service authentication is needed.", + "type": "string", + "default": "" + }, + "placement_constraint":{ + "description":"Marathon-style placement constraint for Broker nodes. Example: rack_id:LIKE:rack-foo-.*,rack_id:MAX_PER:2", + "type":"string", + "default":"" + }, + "placement_strategy":{ + "description":"Broker placement strategy. See documentation. [ANY, NODE]", + "type":"string", + "default":"NODE" + }, + "phase_strategy":{ + "description":"Broker rollout strategy. See documentation. [INSTALL, STAGE]", + "type":"string", + "default":"INSTALL" + }, + "enable_replacement":{ + "description":"Enable automated replacement of Brokers. WARNING: May cause data loss. See documentation.", + "type":"boolean", + "default":false + }, + "recover_in_place_grace_period_secs":{ + "description":"The minimum amount of time (in seconds) which must pass before a Broker may be destructively replaced.", + "type":"number", + "default":1200 + }, + "min_delay_between_recovers_secs":{ + "description":"The minimum amount of time (in seconds) which must pass between destructive replacements of Brokers.", + "type":"number", + "default":600 + }, + "enable_health_check":{ + "description":"Enable automated detection of Broker failures which did not result in a Broker process exit.", + "type":"boolean", + "default":false + }, + "health_check_delay_sec":{ + "description":"The period of time (in seconds) waited before the health-check begins execution.", + "type":"number", + "default":30 + }, + "health_check_interval_sec":{ + "description":"The period of time (in seconds) between health-check executions.", + "type":"number", + "default":30 + }, + "health_check_timeout_sec":{ + "description":"The duration (in seconds) allowed for a health-check to complete before it is considered a failure.", + "type":"number", + "default":10 + }, + "health_check_grace_period_sec":{ + "description":"The period of time after the delay (in seconds) before health-check failures count towards the maximum consecutive failures.", + "type":"number", + "default":120 + }, + "health_check_max_consecutive_failures":{ + "description":"The the number of consecutive failures which cause a Broker process to exit.", + "type":"number", + "default":2 + } + }, + "required":[ + "placement_strategy", + "phase_strategy" + ] + }, + "brokers":{ + "description":"Kafka broker configuration properties", + "type":"object", + "properties":{ + "cpus":{ + "description":"Broker cpu requirements", + "type":"number", + "default":1.0 + }, + "mem":{ + "description":"Broker mem requirements. Amount should be at least 512 MB more than JVM Heap allocation for broker process.", + "type":"integer", + "minimum":1536, + "default":3072 + }, + "heap":{ + "description":"The Kafka process JVM heap configuration object", + "type":"object", + "properties":{ + "size": { + "type":"integer", + "description":"The amound memory, in MB, allocated to the Kafka broker JVM heap. Amount should NOT exceed {broker_container_mem - 512}.", + "minimum":1024, + "default": 2048 + } + }, + "additionalProperties": false, + "required": [ + "size" + ] + }, + "disk":{ + "description":"Persistent disk capacity, in MB, for kafka log storage on the broker", + "type":"integer", + "default":5000 + }, + "disk_type": { + "type": "string", + "description": "Disk type to be used for persistent disk capacity. Supported options are [ROOT, MOUNT] (see documentation)", + "default": "ROOT" + }, + "count":{ + "description":"Number of brokers to run", + "type":"number", + "default":3 + }, + "port": { + "description": "Port for broker to listen on", + "type": "integer", + "default": 0 + }, + "jmx": { + "description":"JMX options", + "type":"object", + "properties": { + "enable": { + "description": "Whether to enable JMX options", + "type": "boolean", + "default": false + }, + "remote": { + "description": "Whether to enable remote monitoring", + "type": "boolean", + "default": false + }, + "remote_port": { + "description": "Port number for Kafka access", + "type": "integer", + "default": 9999 + }, + "remote_registry_ssl": { + "description": "Whether to protect the remote RMI registry using SSL", + "type": "boolean", + "default": false + }, + "remote_ssl": { + "description": "Whether to enable SSL for remote access", + "type": "boolean", + "default": false + }, + "remote_authenticate": { + "description": "Whether to enable password authentication for remote access", + "type": "boolean", + "default": false + }, + "remote_ssl_need_client_auth": { + "description": "Whether to enable SSL client authentication for remote access", + "type": "boolean", + "default": false + } + } + }, + "statsd": { + "description":"Statsd configuration", + "type":"object", + "properties": { + "host": { + "description": "Statsd UDP output host", + "type": "string", + "default": "" + }, + "port": { + "description": "Statsd UDP output port", + "type": "integer", + "default": 0 + } + } + } + }, + "required":[ + "cpus", + "mem", + "disk", + "count" + ] + }, + "executor":{ + "description":"Kafka executor configuration properties", + "type":"object", + "properties":{ + "cpus":{ + "description":"Executor cpu requirements", + "type":"number", + "default":0.5 + }, + "mem":{ + "description":"Executor mem requirements", + "type":"integer", + "default":256 + }, + "disk":{ + "description":"Executor disk requirements", + "type":"integer", + "default":0 + } + }, + "required":[ + "cpus", + "mem", + "disk" + ] + }, + "kafka":{ + "description":"Kafka service configuration properties", + "type":"object", + "additionalProperties":false, + "properties":{ + "kafka_zookeeper_uri":{ + "title":"The address of the Zookeeper cluster used by Kafka.", + "description":"This should be the address of the Zookeeper cluster Kafka will use.", + "type":"string", + "default":"master.mesos:2181" + }, + "kafka_advertise_host_ip":{ + "description":"Automatically configure advertised.host.name with the IP address detected by /opt/mesosphere/detect_ip", + "type":"boolean", + "default":true + }, + "auto_create_topics_enable":{ + "title":"auto.create.topics.enable", + "description":"Enables auto creation of topic on the server", + "type":"boolean", + "default":true + }, + "auto_leader_rebalance_enable":{ + "title":"auto.leader.rebalance.enable", + "description":"Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals", + "type":"boolean", + "default":true + }, + "background_threads":{ + "title":"background.threads", + "description":"The number of threads to use for various background processing tasks", + "type":"integer", + "default":10 + }, + "compression_type":{ + "title":"compression.type", + "description":"Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', lz4). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", + "type":"string", + "default":"producer" + }, + "delete_topic_enable":{ + "title":"delete.topic.enable", + "description":"Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off", + "type":"boolean", + "default":false + }, + "leader_imbalance_check_interval_seconds":{ + "title":"leader.imbalance.check.interval.seconds", + "description":"The frequency with which the partition rebalance check is triggered by the controller", + "type":"integer", + "default":300 + }, + "leader_imbalance_per_broker_percentage":{ + "title":"leader.imbalance.per.broker.percentage", + "description":"The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage.", + "type":"integer", + "default":10 + }, + "confluent_support_metrics_enable":{ + "title":"confluent.support.metrics.enable", + "description":"Enable the Metrics feature to collect and report support metrics", + "type":"boolean", + "default":false + }, + "confluent_support_customer_id":{ + "title":"confluent.support.customer.id", + "description":"The customer ID under which support metrics will be collected and reported. When set to 'anonymous', only a reduced set of metrics is collected and reported.", + "type":"string", + "default":"anonymous" + }, + "confluent_metric_reporters":{ + "title":"metric.reporters", + "description":"Enterprise java class to collect/report broker metrics", + "type":"string", + "default":"io.confluent.metrics.reporter.ConfluentMetricsReporter" + }, + "confluent_metrics_reporter_topic":{ + "title":"confluent.metrics.reporter.topic", + "description":"Internal metrics topic (DO NOT CHANGE)", + "type":"string", + "default":"_confluent-metrics" + }, + "confluent_metrics_reporter_topic_replicas":{ + "title":"confluent.metrics.reporter.topic.replicas", + "description":"Replication factor for the internal metrics topic", + "type":"integer", + "default":3 + }, + "log_flush_interval_messages":{ + "title":"log.flush.interval.messages", + "description":"The number of messages accumulated on a log partition before messages are flushed to disk", + "type":"string", + "default":"9223372036854775807" + }, + "log_flush_offset_checkpoint_interval_ms":{ + "title":"log.flush.offset.checkpoint.interval.ms", + "description":"The frequency with which we update the persistent record of the last flush which acts as the log recovery point", + "type":"integer", + "default":60000 + }, + "log_flush_scheduler_interval_ms":{ + "title":"log.flush.scheduler.interval.ms", + "description":"The frequency in ms that the log flusher checks whether any log needs to be flushed to disk", + "type":"string", + "default":"9223372036854775807" + }, + "log_retention_bytes":{ + "title":"log.retention.bytes", + "description":"The maximum size of the log before deleting it", + "type":"string", + "default":"-1" + }, + "log_retention_hours":{ + "title":"log.retention.hours", + "description":"The number of hours to keep a log file before deleting it (in hours), tertiary to log.retention.ms property", + "type":"integer", + "default":168 + }, + "log_roll_hours":{ + "title":"log.roll.hours", + "description":"The maximum time before a new log segment is rolled out (in hours), secondary to log.roll.ms property", + "type":"integer", + "default":168 + }, + "log_roll_jitter_hours":{ + "title":"log.roll.jitter.hours", + "description":"The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to log.roll.jitter.ms property", + "type":"integer", + "default":0 + }, + "log_segment_bytes":{ + "title":"log.segment.bytes", + "description":"The maximum size of a single log file", + "type":"integer", + "default":1073741824 + }, + "log_segment_delete_delay_ms":{ + "title":"log.segment.delete.delay.ms", + "description":"The amount of time to wait before deleting a file from the filesystem", + "type":"integer", + "default":60000 + }, + "message_max_bytes":{ + "title":"message.max.bytes", + "description":"The maximum size of message that the server can receive", + "type":"integer", + "default":1000012 + }, + "min_insync_replicas":{ + "title":"min.insync.replicas", + "description":"define the minimum number of replicas in ISR needed to satisfy a produce request with required.acks=-1 (or all)", + "type":"integer", + "default":1 + }, + "num_io_threads":{ + "title":"num.io.thread", + "description":"The number of io threads that the server uses for carrying out network requests", + "type":"integer", + "default":8 + }, + "num_network_threads":{ + "title":"num.network.threads", + "description":"The number of network threads that the server uses for handling network requests", + "type":"integer", + "default":3 + }, + "num_recovery_threads_per_data_dir":{ + "title":"num.recovery.threads.per.data.dir", + "description":"The number of threads per data directory to be used for log recovery at startup and flushing at shutdown", + "type":"integer", + "default":1 + }, + "num_replica_fetchers":{ + "title":"num.replica.fetchers", + "description":"Number of fetcher threads used to replicate messages from a source broker. Increasing this value can increase the degree of I/O parallelism in the follower broker.", + "type":"integer", + "default":1 + }, + "offset_metadata_max_bytes":{ + "title":"offset.metadata.max.bytes", + "description":"The maximum size for a metadata entry associated with an offset commit", + "type":"integer", + "default":4096 + }, + "offsets_commit_required_acks":{ + "title":"offsets.commit.required.acks", + "description":"The required acks before the commit can be accepted. In general, the default (-1) should not be overridden", + "type":"integer", + "default":-1 + }, + "offsets_commit_timeout_ms":{ + "title":"offsets.commit.timeout.ms", + "description":"Offset commit will be delayed until all replicas for the offsets topic receive the commit or this timeout is reached. This is similar to the producer request timeout.", + "type":"integer", + "default":5000 + }, + "offsets_load_buffer_size":{ + "title":"offsets.load.buffer.size", + "description":"Batch size for reading from the offsets segments when loading offsets into the cache.", + "type":"integer", + "default":5242880 + }, + "offsets_retention_check_interval_ms":{ + "title":"offsets.retention.check.interval.ms", + "description":"Frequency at which to check for stale offsets", + "type":"integer", + "default":600000 + }, + "offsets_retention_minutes":{ + "title":"offsets.retention.minutes", + "description":"Log retention window in minutes for offsets topic", + "type":"integer", + "default":1440 + }, + "offsets_topic_compression_codec":{ + "title":"offsets.topic.compression.codec", + "description":"Compression codec for the offsets topic - compression may be used to achieve 'atomic' commits", + "type":"integer", + "default":0 + }, + "offsets_topic_num_partitions":{ + "title":"offsets.topic.num.partitions", + "description":"The number of partitions for the offset commit topic (should not change after deployment).", + "type":"integer", + "default":50 + }, + "offsets_topic_replication_factor":{ + "title":"offsets.topic.replication.factor", + "description":"The replication factor for the offsets topic (set higher to ensure availability). To ensure that the effective replication factor of the offsets topic is the configured value, the number of alive brokers has to be at least the replication factor at the time of the first request for the offsets topic. If not, either the offsets topic creation will fail or it will get a replication factor of min(alive brokers, configured replication factor)", + "type":"integer", + "default":3 + }, + "offsets_topic_segment_bytes":{ + "title":"offsets.topic.segment.bytes", + "description":"The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads", + "type":"integer", + "default":104857600 + }, + "queued_max_requests":{ + "title":"queued.max.requests", + "description":"The number of queued requests allowed before blocking the network threads ", + "type":"integer", + "default":500 + }, + "quota_consumer_default":{ + "title":"quota.consumer.default", + "description":"Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second", + "type":"string", + "default":"9223372036854775807" + }, + "quota_producer_default":{ + "title":"quota.producer.default", + "description":"Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second", + "type":"string", + "default":"9223372036854775807" + }, + "replica_fetch_max_bytes":{ + "title":"replica.fetch.max.bytes", + "description":"The number of byes of messages to attempt to fetch", + "type":"integer", + "default":1048576 + }, + "replica_fetch_min_bytes":{ + "title":"replica.fetch.min.bytes", + "description":"Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs", + "type":"integer", + "default":1 + }, + "replica_fetch_wait_max_ms":{ + "title":"replica.fetch.wait.max.ms", + "description":"Max wait time for each fetcher request issued by follower replicas. This value should always be less than the replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics", + "type":"integer", + "default":500 + }, + "replica_high_watermark_checkpoint_interval_ms":{ + "title":"replica.high.watermark.checkpoint.interval.ms", + "description":"The frequency with which the high watermark is saved out to disk", + "type":"integer", + "default":5000 + }, + "replica_lag_time_max_ms":{ + "title":"replica.lag.time.max.ms", + "description":"If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time, the leader will remove the follower from isr", + "type":"integer", + "default":10000 + }, + "replica_socket_receive_buffer_bytes":{ + "title":"replica.socket.receive.buffer.bytes", + "description":"The socket receive buffer for network requests", + "type":"integer", + "default":65536 + }, + "replica_socket_timeout_ms":{ + "title":"replica.socket.timeout.ms", + "description":"The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms", + "type":"integer", + "default":30000 + }, + "request_timeout_ms":{ + "title":"request.timeout.ms", + "description":"The configuration controls the maximum amount of time the client will wait for the response of a request. If the response is not received before the timeout elapses the client will resend the request if necessary or fail the request if retries are exhausted.", + "type":"integer", + "default":30000 + }, + "socket_receive_buffer_bytes":{ + "title":"socket.receive.buffer.bytes", + "description":"The SO_RCVBUF buffer of the socket sever sockets", + "type":"integer", + "default":102400 + }, + "socket_request_max_bytes":{ + "title":"socket.request.max.bytes", + "description":"The maximum number of bytes in a socket request", + "type":"integer", + "default":104857600 + }, + "socket_send_buffer_bytes":{ + "title":"socket.send.buffer.bytes", + "description":"The SO_SNDBUF buffer of the socket sever sockets", + "type":"integer", + "default":102400 + }, + "unclean_leader_election_enable":{ + "title":"unclean.leader.election.enable", + "description":"Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss", + "type":"boolean", + "default":true + }, + "zookeeper_session_timeout_ms":{ + "title":"zookeeper.session.timeout.ms", + "description":"Zookeeper session timeout", + "type":"integer", + "default":6000 + }, + "connections_max_idle_ms":{ + "title":"connections.max.idle.ms", + "description":"Idle connections timeout: the server socket processor threads close the connections that idle more than this", + "type":"integer", + "default":600000 + }, + "controlled_shutdown_enable":{ + "title":"controlled.shutdown.enable", + "description":"Enable controlled shutdown of the server", + "type":"boolean", + "default":true + }, + "controlled_shutdown_max_retries":{ + "title":"controlled.shutdown.max.retries", + "description":"Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens", + "type":"integer", + "default":3 + }, + "controlled_shutdown_retry_backoff_ms":{ + "title":"controlled.shutdown.retry.backoff.ms", + "description":"Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying.", + "type":"integer", + "default":5000 + }, + "controller_socket_timeout_ms":{ + "title":"controller.socket.timeout.ms", + "description":"The socket timeout for controller-to-broker channels", + "type":"integer", + "default":30000 + }, + "default_replication_factor":{ + "title":"default.replication.factor", + "description":"Default replication factors for automatically created topics", + "type":"integer", + "default":1 + }, + "fetch_purgatory_purge_interval_requests":{ + "title":"fetch.purgatory.purge.interval.requests", + "description":"The purge interval (in number of requests) of the fetch request purgatory", + "type":"integer", + "default":1000 + }, + "group_max_session_timeout_ms":{ + "title":"group.max.session.timeout.ms", + "description":"The maximum allowed session timeout for registered consumers", + "type":"integer", + "default":300000 + }, + "group_min_session_timeout_ms":{ + "title":"group.min.session.timeout.ms", + "description":"The minimum allowed session timeout for registered consumers", + "type":"integer", + "default":6000 + }, + "inter_broker_protocol_version":{ + "type":"string", + "title":"inter.broker.protocol.version", + "description":"Specify which version of the inter-broker protocol will be used, which must align with log.message.format.version. This is typically bumped after all brokers were upgraded to a new version. Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1, 0.10.0.0. Check ApiVersion for the full list.", + "default":"0.10.0.0" + }, + "log_message_format_version":{ + "type":"string", + "title":"log.message.format.version", + "description":"Specify which version of the log message format will be used, which must align with inter.broker.protocol.version. This is a new setting as of 0.10.0.0, and should be left at 0.9.0 until clients are updated to 0.10.0.x. Clients on earlier versions may see a performance penalty if this is increased before they've upgraded. See the latest Kafka documentation for details.", + "default":"0.10.0" + }, + "log_cleaner_backoff_ms":{ + "title":"log.cleaner.backoff.ms", + "description":"The amount of time to sleep when there are no logs to clean", + "type":"integer", + "default":15000 + }, + "log_cleaner_dedupe_buffer_size":{ + "title":"log.cleaner.dedupe.buffer.size", + "description":"The total memory used for log deduplication across all cleaner threads", + "type":"integer", + "default":134217728 + }, + "log_cleaner_delete_retention_ms":{ + "title":"log.cleaner.delete.retention.ms", + "description":"How long are delete records retained?", + "type":"integer", + "default":86400000 + }, + "log_cleaner_enable":{ + "title":"log.cleaner.enable", + "description":"Enable the log cleaner process to run on the server? Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size.", + "type":"boolean", + "default":true + }, + "log_cleaner_io_buffer_load_factor":{ + "title":"log.cleaner.io.buffer.load.factor", + "description":"Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value will allow more log to be cleaned at once but will lead to more hash collisions", + "type":"number", + "default":0.9 + }, + "log_cleaner_io_buffer_size":{ + "title":"log.cleaner.io.buffer.size", + "description":"The total memory used for log cleaner I/O buffers across all cleaner threads", + "type":"integer", + "default":524288 + }, + "log_cleaner_io_max_bytes_per_second":{ + "title":"log.cleaner.io.max.bytes.per.second", + "description":"The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average", + "type":"number", + "default":1.7976931348623157E308 + }, + "log_cleaner_min_cleanable_ratio":{ + "title":"log.cleaner.min.cleanable.ratio", + "description":"The minimum ratio of dirty log to total log for a log to eligible for cleaning", + "type":"number", + "default":0.5 + }, + "log_cleaner_threads":{ + "title":"log.cleaner.threads", + "description":"The number of background threads to use for log cleaning", + "type":"integer", + "default":1 + }, + "log_cleanup_policy":{ + "type":"string", + "title":"log.cleanup.policy", + "description":"The default cleanup policy for segments beyond the retention window, must be either 'delete' or 'compact'", + "default":"delete" + }, + "log_index_interval_bytes":{ + "title":"log.index.interval.bytes", + "description":"The interval with which we add an entry to the offset index", + "type":"integer", + "default":4096 + }, + "log_index_size_max_bytes":{ + "title":"log.index.size.max.bytes", + "description":"The maximum size in bytes of the offset index", + "type":"integer", + "default":10485760 + }, + "log_preallocate":{ + "title":"log.preallocate", + "description":"Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true.", + "type":"boolean", + "default":false + }, + "log_retention_check_interval_ms":{ + "title":"log.retention.check.interval.ms", + "description":"The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion", + "type":"integer", + "default":300000 + }, + "max_connections_per_ip":{ + "title":"max.connections.per.ip", + "description":"mum number of connections we allow from each ip address", + "type":"integer", + "default":2147483647 + }, + "max_connections_per_ip_overrides":{ + "type":"string", + "title":"max.connections.per.ip.overrides", + "description":"Per-ip or hostname overrides to the default maximum number of connections", + "default":"" + }, + "num_partitions":{ + "title":"num.partitions", + "description":"The default number of log partitions per topic", + "type":"integer", + "default":1 + }, + "producer_purgatory_purge_interval_requests":{ + "title":"producer.purgatory.purge.interval.requests", + "description":"The purge interval (in number of requests) of the producer request purgatory", + "type":"integer", + "default":1000 + }, + "replica_fetch_backoff_ms":{ + "title":"replica.fetch.backoff.ms", + "description":"The amount of time to sleep when fetch partition error occurs.", + "type":"integer", + "default":1000 + }, + "reserved_broker_max_id":{ + "title":"reserved.broker.max.id", + "description":"Max number that can be used for a broker.id", + "type":"integer", + "default":1000 + }, + "metrics_num_samples":{ + "title":"metrics.num.samples", + "description":"The number of samples maintained to compute metrics.", + "type":"integer", + "default":2 + }, + "metrics_sample_window_ms":{ + "title":"metrics.sample.window.ms", + "description":"The number of samples maintained to compute metrics.", + "type":"integer", + "default":30000 + }, + "quota_window_num":{ + "title":"quota.window.num", + "description":"The number of samples to retain in memory", + "type":"integer", + "default":11 + }, + "quota_window_size_seconds":{ + "title":"quota.window.size.seconds", + "description":"The time span of each sample", + "type":"integer", + "default":1 + }, + "zookeeper_sync_time_ms":{ + "title":"zookeeper.sync.time.ms", + "description":"How far a ZK follower can be behind a ZK leader", + "type":"integer", + "default":2000 + } + } + } + } +} diff --git a/repo/packages/C/confluent-kafka/18/marathon.json.mustache b/repo/packages/C/confluent-kafka/18/marathon.json.mustache new file mode 100644 index 000000000..d254b7ede --- /dev/null +++ b/repo/packages/C/confluent-kafka/18/marathon.json.mustache @@ -0,0 +1,208 @@ +{ + "id": "{{service.name}}", + "cpus": 1.0, + "mem": 1230, + "instances": 1, + "cmd": "export LD_LIBRARY_PATH=$MESOS_SANDBOX/libmesos-bundle/lib:$LD_LIBRARY_PATH && export MESOS_NATIVE_JAVA_LIBRARY=$(ls $MESOS_SANDBOX/libmesos-bundle/lib/libmesos-*.so) && export PATH=$(ls -d $MESOS_SANDBOX/jre*/bin):$PATH && ./scheduler/bin/kafka-scheduler server ./scheduler/conf/scheduler.yml", + "labels": { + "DCOS_PACKAGE_FRAMEWORK_NAME": "{{service.name}}", + "DCOS_MIGRATION_API_VERSION": "v1", + "DCOS_MIGRATION_API_PATH": "/v1/plan", + "MARATHON_SINGLE_INSTANCE_APP":"true", + "DCOS_SERVICE_NAME": "{{service.name}}", + "DCOS_SERVICE_PORT_INDEX": "1", + "DCOS_SERVICE_SCHEME": "http" + }, + "env": { + "LD_LIBRARY_PATH": "/opt/mesosphere/lib", + "JAVA_HOME":"./jre1.8.0_121", + "FRAMEWORK_NAME": "{{service.name}}", + "FRAMEWORK_PRINCIPAL": "{{service.principal}}", + "USER": "{{service.user}}", + "PLACEMENT_CONSTRAINT": "{{service.placement_constraint}}", + "PLACEMENT_STRATEGY": "{{service.placement_strategy}}", + "PHASE_STRATEGY": "{{service.phase_strategy}}", + "ENABLE_REPLACEMENT": "{{service.enable_replacement}}", + "RECOVERY_GRACE_PERIOD_SEC": "{{service.recover_in_place_grace_period_secs}}", + "REPLACE_DELAY_SEC": "{{service.min_delay_between_recovers_secs}}", + "ENABLE_BROKER_HEALTH_CHECK": "{{service.enable_health_check}}", + "BROKER_HEALTH_CHECK_DELAY_SEC": "{{service.health_check_delay_sec}}", + "BROKER_HEALTH_CHECK_INTERVAL_SEC": "{{service.health_check_interval_sec}}", + "BROKER_HEALTH_CHECK_TIMEOUT_SEC": "{{service.health_check_timeout_sec}}", + "BROKER_HEALTH_CHECK_MAX_FAILURES": "{{service.health_check_max_consecutive_failures}}", + "BROKER_HEALTH_CHECK_GRACE_SEC": "{{service.health_check_grace_period_sec}}", + "BROKER_COUNT": "{{brokers.count}}", + "BROKER_CPUS": "{{brokers.cpus}}", + "BROKER_MEM": "{{brokers.mem}}", + "BROKER_HEAP_MB": "{{brokers.heap.size}}", + "BROKER_DISK": "{{brokers.disk}}", + "BROKER_PORT": "{{brokers.port}}", + "BROKER_JMX_ENABLE": "{{brokers.jmx.enable}}", + "BROKER_JMX_REMOTE_ENABLE": "{{brokers.jmx.remote}}", + "BROKER_JMX_REMOTE_PORT": "{{brokers.jmx.remote_port}}", + "BROKER_JMX_REMOTE_REGISTRY_SSL": "{{brokers.jmx.remote_registry_ssl}}", + "BROKER_JMX_REMOTE_SSL": "{{brokers.jmx.remote_ssl}}", + "BROKER_JMX_REMOTE_AUTH": "{{brokers.jmx.remote_authenticate}}", + "BROKER_JMX_REMOTE_SSL_NEED_CLIENT_AUTH": "{{brokers.jmx.remote_ssl_need_client_auth}}", + "BROKER_STATSD_HOST": "{{brokers.statsd.host}}", + "BROKER_STATSD_PORT": "{{brokers.statsd.port}}", + "DISK_TYPE": "{{brokers.disk_type}}", + "KAFKA_VER_NAME": "kafka_confluent-3.2.2", + "KAFKA_URI": "{{resource.assets.uris.kafka_tgz}}", + "OVERRIDER_URI": "{{resource.assets.uris.overrider-zip}}", + "EXECUTOR_URI": "{{resource.assets.uris.executor-zip}}", + "JAVA_URI": "{{resource.assets.uris.jre-tar-gz}}", + "KAFKA_ZOOKEEPER_URI" : "{{kafka.kafka_zookeeper_uri}}", + "KAFKA_ADVERTISE_HOST_IP" : "{{kafka.kafka_advertise_host_ip}}", + "KAFKA_OVERRIDE_RESERVED_BROKER_MAX_ID": "{{kafka.reserved_broker_max_id}}", + "KAFKA_OVERRIDE_OFFSETS_TOPIC_COMPRESSION_CODEC": "{{kafka.offsets_topic_compression_codec}}", + "KAFKA_OVERRIDE_REPLICA_FETCH_MIN_BYTES": "{{kafka.replica_fetch_min_bytes}}", + "KAFKA_OVERRIDE_CONTROLLED_SHUTDOWN_RETRY_BACKOFF_MS": "{{kafka.controlled_shutdown_retry_backoff_ms}}", + "KAFKA_OVERRIDE_LOG_FLUSH_OFFSET_CHECKPOINT_INTERVAL_MS": "{{kafka.log_flush_offset_checkpoint_interval_ms}}", + "KAFKA_OVERRIDE_CONFLUENT_SUPPORT_METRICS_ENABLE": "{{kafka.confluent_support_metrics_enable}}", + "KAFKA_OVERRIDE_CONFLUENT_SUPPORT_CUSTOMER_ID": "{{kafka.confluent_support_customer_id}}", + "KAFKA_OVERRIDE_METRIC_REPORTERS": "{{kafka.confluent_metric_reporters}}", + "KAFKA_OVERRIDE_CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS": "broker.{{service.name}}.l4lb.thisdcos.directory:9092", + "KAFKA_OVERRIDE_CONFLUENT_METRICS_REPORTER_ZOOKEEPER_CONNECT": "master.mesos:2181/dcos-service-{{service.name}}", + "KAFKA_OVERRIDE_CONFLUENT_METRICS_REPORTER_TOPIC": "{{kafka.confluent_metrics_reporter_topic}}", + "KAFKA_OVERRIDE_CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS": "{{kafka.confluent_metrics_reporter_topic_replicas}}", + "KAFKA_OVERRIDE_OFFSETS_TOPIC_NUM_PARTITIONS": "{{kafka.offsets_topic_num_partitions}}", + "KAFKA_OVERRIDE_MAX_CONNECTIONS_PER_IP_OVERRIDES": "{{kafka.max_connections_per_ip_overrides}}", + "KAFKA_OVERRIDE_LEADER_IMBALANCE_CHECK_INTERVAL_SECONDS": "{{kafka.leader_imbalance_check_interval_seconds}}", + "KAFKA_OVERRIDE_INTER_BROKER_PROTOCOL_VERSION": "{{kafka.inter_broker_protocol_version}}", + "KAFKA_OVERRIDE_LOG_MESSAGE_FORMAT_VERSION": "{{kafka.log_message_format_version}}", + "KAFKA_OVERRIDE_REPLICA_SOCKET_TIMEOUT_MS": "{{kafka.replica_socket_timeout_ms}}", + "KAFKA_OVERRIDE_GROUP_MAX_SESSION_TIMEOUT_MS": "{{kafka.group_max_session_timeout_ms}}", + "KAFKA_OVERRIDE_METRICS_NUM_SAMPLES": "{{kafka.metrics_num_samples}}", + "KAFKA_OVERRIDE_LOG_CLEANER_DELETE_RETENTION_MS": "{{kafka.log_cleaner_delete_retention_ms}}", + "KAFKA_OVERRIDE_LOG_PREALLOCATE": "{{kafka.log_preallocate}}", + "KAFKA_OVERRIDE_REPLICA_SOCKET_RECEIVE_BUFFER_BYTES": "{{kafka.replica_socket_receive_buffer_bytes}}", + "KAFKA_OVERRIDE_OFFSET_METADATA_MAX_BYTES": "{{kafka.offset_metadata_max_bytes}}", + "KAFKA_OVERRIDE_MESSAGE_MAX_BYTES": "{{kafka.message_max_bytes}}", + "KAFKA_OVERRIDE_LOG_ROLL_JITTER_HOURS": "{{kafka.log_roll_jitter_hours}}", + "KAFKA_OVERRIDE_OFFSETS_RETENTION_CHECK_INTERVAL_MS": "{{kafka.offsets_retention_check_interval_ms}}", + "KAFKA_OVERRIDE_FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS": "{{kafka.fetch_purgatory_purge_interval_requests}}", + "KAFKA_OVERRIDE_LOG_RETENTION_CHECK_INTERVAL_MS": "{{kafka.log_retention_check_interval_ms}}", + "KAFKA_OVERRIDE_LOG_INDEX_INTERVAL_BYTES": "{{kafka.log_index_interval_bytes}}", + "KAFKA_OVERRIDE_NUM_NETWORK_THREADS": "{{kafka.num_network_threads}}", + "KAFKA_OVERRIDE_OFFSETS_COMMIT_TIMEOUT_MS": "{{kafka.offsets_commit_timeout_ms}}", + "KAFKA_OVERRIDE_OFFSETS_TOPIC_REPLICATION_FACTOR": "{{kafka.offsets_topic_replication_factor}}", + "KAFKA_OVERRIDE_REPLICA_FETCH_MAX_BYTES": "{{kafka.replica_fetch_max_bytes}}", + "KAFKA_OVERRIDE_CONNECTIONS_MAX_IDLE_MS": "{{kafka.connections_max_idle_ms}}", + "KAFKA_OVERRIDE_SOCKET_REQUEST_MAX_BYTES": "{{kafka.socket_request_max_bytes}}", + "KAFKA_OVERRIDE_METRICS_SAMPLE_WINDOW_MS": "{{kafka.metrics_sample_window_ms}}", + "KAFKA_OVERRIDE_NUM_PARTITIONS": "{{kafka.num_partitions}}", + "KAFKA_OVERRIDE_REPLICA_LAG_TIME_MAX_MS": "{{kafka.replica_lag_time_max_ms}}", + "KAFKA_OVERRIDE_LOG_CLEANER_IO_BUFFER_LOAD_FACTOR": "{{kafka.log_cleaner_io_buffer_load_factor}}", + "KAFKA_OVERRIDE_OFFSETS_COMMIT_REQUIRED_ACKS": "{{kafka.offsets_commit_required_acks}}", + "KAFKA_OVERRIDE_AUTO_CREATE_TOPICS_ENABLE": "{{kafka.auto_create_topics_enable}}", + "KAFKA_OVERRIDE_UNCLEAN_LEADER_ELECTION_ENABLE": "{{kafka.unclean_leader_election_enable}}", + "KAFKA_OVERRIDE_REPLICA_FETCH_BACKOFF_MS": "{{kafka.replica_fetch_backoff_ms}}", + "KAFKA_OVERRIDE_LOG_ROLL_HOURS": "{{kafka.log_roll_hours}}", + "KAFKA_OVERRIDE_ZOOKEEPER_SESSION_TIMEOUT_MS": "{{kafka.zookeeper_session_timeout_ms}}", + "KAFKA_OVERRIDE_PRODUCER_PURGATORY_PURGE_INTERVAL_REQUESTS": "{{kafka.producer_purgatory_purge_interval_requests}}", + "KAFKA_OVERRIDE_GROUP_MIN_SESSION_TIMEOUT_MS": "{{kafka.group_min_session_timeout_ms}}", + "KAFKA_OVERRIDE_LOG_INDEX_SIZE_MAX_BYTES": "{{kafka.log_index_size_max_bytes}}", + "KAFKA_OVERRIDE_NUM_REPLICA_FETCHERS": "{{kafka.num_replica_fetchers}}", + "KAFKA_OVERRIDE_MIN_INSYNC_REPLICAS": "{{kafka.min_insync_replicas}}", + "KAFKA_OVERRIDE_LOG_FLUSH_INTERVAL_MESSAGES": "{{kafka.log_flush_interval_messages}}", + "KAFKA_OVERRIDE_SOCKET_SEND_BUFFER_BYTES": "{{kafka.socket_send_buffer_bytes}}", + "KAFKA_OVERRIDE_AUTO_LEADER_REBALANCE_ENABLE": "{{kafka.auto_leader_rebalance_enable}}", + "KAFKA_OVERRIDE_LOG_CLEANER_ENABLE": "{{kafka.log_cleaner_enable}}", + "KAFKA_OVERRIDE_QUEUED_MAX_REQUESTS": "{{kafka.queued_max_requests}}", + "KAFKA_OVERRIDE_CONTROLLED_SHUTDOWN_MAX_RETRIES": "{{kafka.controlled_shutdown_max_retries}}", + "KAFKA_OVERRIDE_OFFSETS_LOAD_BUFFER_SIZE": "{{kafka.offsets_load_buffer_size}}", + "KAFKA_OVERRIDE_LOG_RETENTION_BYTES": "{{kafka.log_retention_bytes}}", + "KAFKA_OVERRIDE_NUM_IO_THREADS": "{{kafka.num_io_threads}}", + "KAFKA_OVERRIDE_CONTROLLER_SOCKET_TIMEOUT_MS": "{{kafka.controller_socket_timeout_ms}}", + "KAFKA_OVERRIDE_LOG_RETENTION_HOURS": "{{kafka.log_retention_hours}}", + "KAFKA_OVERRIDE_LOG_FLUSH_SCHEDULER_INTERVAL_MS": "{{kafka.log_flush_scheduler_interval_ms}}", + "KAFKA_OVERRIDE_OFFSETS_RETENTION_MINUTES": "{{kafka.offsets_retention_minutes}}", + "KAFKA_OVERRIDE_QUOTA_WINDOW_SIZE_SECONDS": "{{kafka.quota_window_size_seconds}}", + "KAFKA_OVERRIDE_LOG_SEGMENT_BYTES": "{{kafka.log_segment_bytes}}", + "KAFKA_OVERRIDE_LEADER_IMBALANCE_PER_BROKER_PERCENTAGE": "{{kafka.leader_imbalance_per_broker_percentage}}", + "KAFKA_OVERRIDE_MAX_CONNECTIONS_PER_IP": "{{kafka.max_connections_per_ip}}", + "KAFKA_OVERRIDE_LOG_CLEANER_DEDUPE_BUFFER_SIZE": "{{kafka.log_cleaner_dedupe_buffer_size}}", + "KAFKA_OVERRIDE_LOG_CLEANER_MIN_CLEANABLE_RATIO": "{{kafka.log_cleaner_min_cleanable_ratio}}", + "KAFKA_OVERRIDE_ZOOKEEPER_SYNC_TIME_MS": "{{kafka.zookeeper_sync_time_ms}}", + "KAFKA_OVERRIDE_QUOTA_CONSUMER_DEFAULT": "{{kafka.quota_consumer_default}}", + "KAFKA_OVERRIDE_DELETE_TOPIC_ENABLE": "{{kafka.delete_topic_enable}}", + "KAFKA_OVERRIDE_LOG_CLEANUP_POLICY": "{{kafka.log_cleanup_policy}}", + "KAFKA_OVERRIDE_DEFAULT_REPLICATION_FACTOR": "{{kafka.default_replication_factor}}", + "KAFKA_OVERRIDE_NUM_RECOVERY_THREADS_PER_DATA_DIR": "{{kafka.num_recovery_threads_per_data_dir}}", + "KAFKA_OVERRIDE_LOG_CLEANER_IO_BUFFER_SIZE": "{{kafka.log_cleaner_io_buffer_size}}", + "KAFKA_OVERRIDE_BACKGROUND_THREADS": "{{kafka.background_threads}}", + "KAFKA_OVERRIDE_LOG_SEGMENT_DELETE_DELAY_MS": "{{kafka.log_segment_delete_delay_ms}}", + "KAFKA_OVERRIDE_QUOTA_WINDOW_NUM": "{{kafka.quota_window_num}}", + "KAFKA_OVERRIDE_REQUEST_TIMEOUT_MS": "{{kafka.request_timeout_ms}}", + "KAFKA_OVERRIDE_LOG_CLEANER_THREADS": "{{kafka.log_cleaner_threads}}", + "KAFKA_OVERRIDE_QUOTA_PRODUCER_DEFAULT": "{{kafka.quota_producer_default}}", + "KAFKA_OVERRIDE_LOG_CLEANER_BACKOFF_MS": "{{kafka.log_cleaner_backoff_ms}}", + "KAFKA_OVERRIDE_CONTROLLED_SHUTDOWN_ENABLE": "{{kafka.controlled_shutdown_enable}}", + "KAFKA_OVERRIDE_SOCKET_RECEIVE_BUFFER_BYTES": "{{kafka.socket_receive_buffer_bytes}}", + "KAFKA_OVERRIDE_REPLICA_FETCH_WAIT_MAX_MS": "{{kafka.replica_fetch_wait_max_ms}}", + "KAFKA_OVERRIDE_REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS": "{{kafka.replica_high_watermark_checkpoint_interval_ms}}", + "KAFKA_OVERRIDE_OFFSETS_TOPIC_SEGMENT_BYTES": "{{kafka.offsets_topic_segment_bytes}}", + "KAFKA_OVERRIDE_LOG_CLEANER_IO_MAX_BYTES_PER_SECOND": "{{kafka.log_cleaner_io_max_bytes_per_second}}", + "KAFKA_OVERRIDE_COMPRESSION_TYPE": "{{kafka.compression_type}}" + {{#service.secret_name}} + ,"DCOS_SERVICE_ACCOUNT_CREDENTIAL": { "secret": "serviceCredential" }, + "MESOS_MODULES": "{\"libraries\": [{\"file\": \"libdcos_security.so\", \"modules\": [{\"name\": \"com_mesosphere_dcos_ClassicRPCAuthenticatee\"}]}]}", + "MESOS_AUTHENTICATEE": "com_mesosphere_dcos_ClassicRPCAuthenticatee" + {{/service.secret_name}} + }, + {{#service.secret_name}} + "secrets": { + "serviceCredential": { + "source": "{{service.secret_name}}" + } + }, + {{/service.secret_name}} + "uris": [ + "{{resource.assets.uris.jre-tar-gz}}", + "{{resource.assets.uris.scheduler-zip}}", + "{{resource.assets.uris.kafka_tgz}}", + "{{resource.assets.uris.libmesos-bundle-tar-gz}}" + ], + "healthChecks": [ + { + "gracePeriodSeconds": 120, + "intervalSeconds": 30, + "maxConsecutiveFailures": 2, + "path": "/admin/healthcheck", + "portIndex": 0, + "protocol": "HTTP", + "timeoutSeconds": 10 + } + ], + "readinessChecks": [ + { + "name": "confluentUpdateProgress", + "protocol": "HTTP", + "path": "/v1/plan", + "portName": "api", + "interval": 10000, + "timeout": 10000, + "httpStatusCodesForReady": [200], + "preserveLastResponse": true + } + ], + "upgradeStrategy":{ + "minimumHealthCapacity": 0, + "maximumOverCapacity": 0 + }, + "portDefinitions": [ + { + "port": 0, + "protocol": "tcp", + "name": "health", + "labels": {} + }, + { + "port": 0, + "protocol": "tcp", + "name": "api", + "labels": {} + } + ] +} diff --git a/repo/packages/C/confluent-kafka/18/package.json b/repo/packages/C/confluent-kafka/18/package.json new file mode 100644 index 000000000..e8e271ea8 --- /dev/null +++ b/repo/packages/C/confluent-kafka/18/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-kafka", + "version": "1.1.19.1-3.2.2", + "minDcosReleaseVersion": "1.7", + "maintainer": "partner-support@confluent.io", + "description": "Apache Kafka by Confluent\n\n\tDocumentation: https://www.confluent.io/whitepaper/deploying-confluent-platform-with-mesosphere", + "selected": true, + "framework": true, + "tags": ["message", "broker", "pubsub", "kafka", "confluent"], + "postInstallNotes": "Apache Kafka by Confluent is being installed.\n\n\tDocumentation: https://www.confluent.io/whitepaper/deploying-confluent-platform-with-mesosphere\n\tCommunity Support: https://groups.google.com/forum/#!forum/confluent-platform", + "postUninstallNotes": "Apache Kafka by Confluent has been uninstalled.\nPlease follow the instructions at https://docs.mesosphere.com/current/usage/service-guides/kafka/uninstall to remove any persistent state if required.", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://raw.githubusercontent.com/confluentinc/kafka/trunk/LICENSE" + } + ] +} diff --git a/repo/packages/C/confluent-kafka/18/resource.json b/repo/packages/C/confluent-kafka/18/resource.json new file mode 100644 index 000000000..5aa63fff3 --- /dev/null +++ b/repo/packages/C/confluent-kafka/18/resource.json @@ -0,0 +1,42 @@ +{ + "assets": { + "uris": { + "jre-tar-gz": "https://downloads.mesosphere.com/java/jre-8u121-linux-x64.tar.gz", + "kafka_tgz": "https://packages.confluent.io/archive/3.2/confluent-3.2.2-mesos.tgz", + "scheduler-zip": "https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/scheduler.zip", + "executor-zip": "https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/executor.zip", + "overrider-zip": "https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/overrider.zip", + "libmesos-bundle-tar-gz": "https://downloads.mesosphere.com/libmesos-bundle/libmesos-bundle-1.9-argus-1.1.x-3.tar.gz" + } + }, + "images": { + "icon-small": "https://downloads.mesosphere.com/universe/assets/icon-service-kafka-small.png", + "icon-medium": "https://downloads.mesosphere.com/universe/assets/icon-service-kafka-medium.png", + "icon-large": "https://downloads.mesosphere.com/universe/assets/icon-service-kafka-large.png" + }, + "cli":{ + "binaries":{ + "darwin":{ + "x86-64":{ + "contentHash":[ { "algo":"sha256", "value":"660f143852b6d3d2b0b2f787fab9e5e2927f4440502a89c9e1c08488cdacc34f" } ], + "kind":"executable", + "url":"https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/dcos-kafka-darwin" + } + }, + "linux":{ + "x86-64":{ + "contentHash":[ { "algo":"sha256", "value":"0b26409c9760fd4d75fca1114d01f25ec069147d578d6f337c88424ebc64c808" } ], + "kind":"executable", + "url":"https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/dcos-kafka-linux" + } + }, + "windows":{ + "x86-64":{ + "contentHash":[ { "algo":"sha256", "value":"60d6f802412b94833cf8db217af162e7dc35c0f5e0a1eb4b587268c361500c3b" } ], + "kind":"executable", + "url":"https://downloads.mesosphere.com/kafka/assets/1.1.19-0.10.1.0/dcos-kafka.exe" + } + } + } + } +} diff --git a/repo/packages/C/confluent-replicator/4/config.json b/repo/packages/C/confluent-replicator/4/config.json new file mode 100644 index 000000000..434dbd00d --- /dev/null +++ b/repo/packages/C/confluent-replicator/4/config.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/schema#", + "properties": { + "connect": { + "properties": { + "name": { + "default": "replicator", + "description": "Service name for the Confluent Enterprise Replicator", + "type": "string" + }, + "instances": { + "default": 1, + "description": "Number of instances to run.", + "minimum": 1, + "type": "integer" + }, + "cpus": { + "default": 2, + "description": "CPU shares to allocate to each connect worker instance.", + "minimum": 1, + "type": "number" + }, + "mem": { + "default": 1024, + "description": "Memory (MB) to allocate to each connect worker instance.", + "minimum": 512, + "type": "number" + }, + "heap": { + "default": 768, + "description": "JVM heap allocation (in MB) for connect worker task; should be ~256MB less than total memory for the instance.", + "minimum": 256, + "type": "number" + }, + "role": { + "default": "*", + "description": "Deploy connect worker only on nodes with this role.", + "type": "string" + }, + "kafka-service": { + "default": "confluent-kafka", + "description": "Target Apache Kafka by Confluent service to which these tasks will connect. ", + "type": "string" + }, + "zookeeper-connect": { + "default": "master.mesos:2181/dcos-service-confluent-kafka", + "description": "Zookeeper Connect string for service cluster. Format is comma-separated list of :/", + "type": "string" + }, + "schema-registry-service": { + "default": "schema-registry", + "description": "Schema Registry service to be used by connect workers. The named VIP associated with this service will be used to specify the converter-schema-registry-url's", + + "type": "string" + } + }, + "required": ["cpus", "mem", "instances", "name"], + "type": "object" + } + }, + "type": "object" +} diff --git a/repo/packages/C/confluent-replicator/4/marathon.json.mustache b/repo/packages/C/confluent-replicator/4/marathon.json.mustache new file mode 100644 index 000000000..b55d6b785 --- /dev/null +++ b/repo/packages/C/confluent-replicator/4/marathon.json.mustache @@ -0,0 +1,67 @@ +{ + "id": "/{{connect.name}}", + "instances": {{connect.instances}}, + "cpus": {{connect.cpus}}, + "mem": {{connect.mem}}, + "maintainer": "partner-support@confluent.io", + "container": { + "type": "DOCKER", + "docker": { + "image": "{{resource.assets.container.docker.image}}", + "forcePullImage": true, + "network": "BRIDGE", + "portMappings": [ { + "containerPort": 8083, + "hostPort": 0, + "protocol": "tcp", + "labels": { + "VIP_0": "{{connect.name}}:8083" + } + } ] + } + }, + "portDefinitions": [ { + "name": "{{connect.name}}", + "port": 8083, + "protocol": "tcp", + "labels": { + "VIP_0": "{{connect.name}}:8083" + } + } ], + "env": { + "CONNECT_BOOTSTRAP_SERVERS": "broker.{{connect.kafka-service}}.l4lb.thisdcos.directory:9092", + "CONNECT_REST_PORT": "8083", + "CONNECT_GROUP_ID": "dcos-{{connect.name}}-group", + "CONNECT_CONFIG_STORAGE_TOPIC": "dcos-{{connect.name}}-configs", + "CONNECT_OFFSET_STORAGE_TOPIC": "dcos-{{connect.name}}-offsets", + "CONNECT_STATUS_STORAGE_TOPIC": "dcos-{{connect.name}}-status", + "CONNECT_KEY_CONVERTER" : "io.confluent.connect.replicator.util.ByteArrayConverter", + "CONNECT_VALUE_CONVERTER" : "io.confluent.connect.replicator.util.ByteArrayConverter", + "CONNECT_INTERNAL_KEY_CONVERTER" : "org.apache.kafka.connect.json.JsonConverter", + "CONNECT_INTERNAL_VALUE_CONVERTER" : "org.apache.kafka.connect.json.JsonConverter", + "CONNECT_PRODUCER_INTERCEPTOR_CLASSES" : "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor", + "CONNECT_CONSUMER_INTERCEPTOR_CLASSES" : "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor", + "CONNECT_ZOOKEEPER_CONNECT": "{{connect.zookeeper-connect}}", + "KAFKA_HEAP_OPTS": "-Xmx{{connect.heap}}M" + }, + "healthChecks": [ + { + "protocol": "HTTP", + "portIndex": 0, + "path": "/", + "gracePeriodSeconds": 60, + "intervalSeconds": 60, + "timeoutSeconds": 20, + "maxConsecutiveFailures": 3, + "ignoreHttp1xx": false + } + ], + "acceptedResourceRoles": [ + "{{connect.role}}" + ], + "labels": { + "DCOS_SERVICE_NAME": "{{connect.name}}", + "DCOS_SERVICE_SCHEME": "http", + "DCOS_SERVICE_PORT_INDEX": "0" + } +} diff --git a/repo/packages/C/confluent-replicator/4/package.json b/repo/packages/C/confluent-replicator/4/package.json new file mode 100644 index 000000000..4b8010704 --- /dev/null +++ b/repo/packages/C/confluent-replicator/4/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-replicator", + "version": "1.0.0-3.2.2", + "scm": "https://github.com/confluentinc/kafka", + "description": "Confluent Enterprise Replicator for multi-data center synchronization of topic data\n\n\tDocumentation: http://docs.confluent.io/3.2.2/connect/connect-replicator/docs/index.html", + "maintainer": "partner-support@confluent.io", + "tags": ["kafka", "confluent", "connect", "replicator"], + "preInstallNotes": "This DC/OS Service is currently in preview. Preparing to install Replicator service", + "postInstallNotes": "Replicator service has been installed.", + "postUninstallNotes": "Replicator service was uninstalled successfully.", + "minDcosReleaseVersion" : "1.8", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://www.apache.org/licenses/LICENSE-2.0" + } + ] +} diff --git a/repo/packages/C/confluent-replicator/4/resource.json b/repo/packages/C/confluent-replicator/4/resource.json new file mode 100644 index 000000000..8dde6d4a9 --- /dev/null +++ b/repo/packages/C/confluent-replicator/4/resource.json @@ -0,0 +1,14 @@ +{ + "assets": { + "container": { + "docker": { + "image": "confluentinc/cp-enterprise-replicator:3.2.2" + } + } + }, + "images": { + "icon-small": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_small.png", + "icon-medium": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_medium.png", + "icon-large": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_large.png" + } +} diff --git a/repo/packages/C/confluent-rest-proxy/7/config.json b/repo/packages/C/confluent-rest-proxy/7/config.json new file mode 100644 index 000000000..798c36fac --- /dev/null +++ b/repo/packages/C/confluent-rest-proxy/7/config.json @@ -0,0 +1,62 @@ +{ + "$schema": "http://json-schema.org/schema#", + "properties": { + "proxy": { + "properties": { + "name": { + "default": "rest-proxy", + "description": "Service name for the rest-proxy instance(s)", + "type": "string" + }, + "instances": { + "default": 1, + "description": "Number of instances to run.", + "minimum": 1, + "type": "integer" + }, + "cpus": { + "default": 2, + "description": "CPU shares to allocate to each rest-proxy instance.", + "minimum": 2, + "type": "number" + }, + "mem": { + "default": 1024, + "description": "Memory (MB) to allocate to each rest-proxy instance.", + "minimum": 512, + "type": "number" + }, + "heap": { + "default": 768, + "description": "JVM heap allocation (in MB) for rest-proxy task; should be ~256MB less than total memory for the instance.", + "minimum": 256, + "type": "number" + }, + "role": { + "default": "*", + "description": "Deploy rest-proxy only on nodes with this role.", + "type": "string" + }, + "kafka-service": { + "default": "confluent-kafka", + "description": "Target Apache Kafka by Confluent service to which these tasks will connect. ", + "type": "string" + }, + "zookeeper-connect": { + "default": "master.mesos:2181/dcos-service-confluent-kafka", + "description": "Zookeeper Connect string for service cluster. Format is comma-separated list of :/", + "type": "string" + }, + "schema-registry-service": { + "default": "schema-registry", + "description": "Schema Registry service to be used by REST Proxy workers. The named VIP associated with this service will be used to specify the converter-schema-registry-url's", + + "type": "string" + } + }, + "required": ["cpus", "mem", "instances", "name"], + "type": "object" + } + }, + "type": "object" +} diff --git a/repo/packages/C/confluent-rest-proxy/7/marathon.json.mustache b/repo/packages/C/confluent-rest-proxy/7/marathon.json.mustache new file mode 100644 index 000000000..8a0effb4a --- /dev/null +++ b/repo/packages/C/confluent-rest-proxy/7/marathon.json.mustache @@ -0,0 +1,57 @@ +{ + "id": "/{{proxy.name}}", + "instances": {{proxy.instances}}, + "cpus": {{proxy.cpus}}, + "mem": {{proxy.mem}}, + "maintainer": "partner-support@confluent.io", + "container": { + "type": "DOCKER", + "docker": { + "image": "{{resource.assets.container.docker.image}}", + "forcePullImage": true, + "network": "BRIDGE", + "portMappings": [ { + "containerPort": 8082, + "hostPort": 0, + "protocol": "tcp", + "labels": { + "VIP_0": "{{proxy.name}}:8082" + } + } ] + } + }, + "portDefinitions": [ { + "name": "{{proxy.name}}", + "port": 8082, + "protocol": "tcp", + "labels": { + "VIP_0": "{{proxy.name}}:8082" + } + } ], + "env": { + "KAFKAREST_HEAP_OPTS": "-Xmx{{proxy.heap}}M", + "KAFKA_REST_BOOTSTRAP_SERVERS": "broker.{{proxy.kafka-service}}.l4lb.thisdcos.directory:9092", + "KAFKA_REST_ZOOKEEPER_CONNECT": "{{proxy.zookeeper-connect}}", + "KAFKA_REST_SCHEMA_REGISTRY_URL": "http://{{proxy.schema-registry-service}}.marathon.l4lb.thisdcos.directory:8081" + }, + "healthChecks": [ + { + "protocol": "HTTP", + "portIndex": 0, + "path": "/", + "gracePeriodSeconds": 60, + "intervalSeconds": 60, + "timeoutSeconds": 20, + "maxConsecutiveFailures": 3, + "ignoreHttp1xx": false + } + ], + "acceptedResourceRoles": [ + "{{proxy.role}}" + ], + "labels": { + "DCOS_SERVICE_NAME": "{{proxy.name}}", + "DCOS_SERVICE_SCHEME": "http", + "DCOS_SERVICE_PORT_INDEX": "0" + } +} diff --git a/repo/packages/C/confluent-rest-proxy/7/package.json b/repo/packages/C/confluent-rest-proxy/7/package.json new file mode 100644 index 000000000..31c54edc1 --- /dev/null +++ b/repo/packages/C/confluent-rest-proxy/7/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-rest-proxy", + "version": "1.0.0-3.2.2", + "scm": "https://github.com/confluentinc/kafka-rest", + "description": "Confluent REST Proxy service\n\n\tDocumentation: http://docs.confluent.io/3.2.2/kafka-rest/docs/api.html", + "maintainer": "partner-support@confluent.io", + "tags": ["kafka", "confluent", "proxy", "rest"], + "preInstallNotes": "This DC/OS Service is currently in preview. Preparing to install confluent-rest-proxy", + "postInstallNotes": "confluent-rest-proxy has been installed.", + "postUninstallNotes": "confluent-rest-proxy was uninstalled successfully.", + "minDcosReleaseVersion" : "1.8", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://raw.githubusercontent.com/confluentinc/kafka-rest/master/LICENSE" + } + ] +} diff --git a/repo/packages/C/confluent-rest-proxy/7/resource.json b/repo/packages/C/confluent-rest-proxy/7/resource.json new file mode 100644 index 000000000..1b64f3c9f --- /dev/null +++ b/repo/packages/C/confluent-rest-proxy/7/resource.json @@ -0,0 +1,14 @@ +{ + "assets": { + "container": { + "docker": { + "image": "confluentinc/cp-kafka-rest:3.2.2" + } + } + }, + "images": { + "icon-small": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_small.png", + "icon-medium": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_medium.png", + "icon-large": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_large.png" + } +} diff --git a/repo/packages/C/confluent-schema-registry/8/config.json b/repo/packages/C/confluent-schema-registry/8/config.json new file mode 100644 index 000000000..a5f43e3be --- /dev/null +++ b/repo/packages/C/confluent-schema-registry/8/config.json @@ -0,0 +1,57 @@ +{ + "$schema": "http://json-schema.org/schema#", + "properties": { + "registry": { + "properties": { + "name": { + "default": "schema-registry", + "description": "Service name for the schema-registry instance(s)", + "type": "string" + }, + "instances": { + "default": 1, + "description": "Number of instances to run (currently limited to 1).", + "minimum": 1, + "maximum": 1, + "type": "integer" + }, + "cpus": { + "default": 1, + "description": "CPU shares to allocate to each schema-registry instance.", + "minimum": 1, + "type": "number" + }, + "mem": { + "default": 512, + "description": "Memory (MB) to allocate to each schema-registry instance.", + "minimum": 320, + "type": "number" + }, + "heap": { + "default": 256, + "description": "JVM heap allocation (in MB) for connect worker task; should be no greater than ~64MB less than total memory for the instance.", + "minimum": 256, + "type": "number" + }, + "role": { + "default": "*", + "description": "Deploy schema-registry only on nodes with this role.", + "type": "string" + }, + "zookeeper-master": { + "default": "master.mesos:2181", + "description": "Zookeeper Connect string for service cluster. Format limited to single target: :", + "type": "string" + }, + "kafkastore": { + "default": "dcos-service-confluent-kafka", + "description": "Name of the Kafka service hosting the storage for this Schema Registry edition.", + "type": "string" + } + }, + "required": ["cpus", "mem", "instances", "name"], + "type": "object" + } + }, + "type": "object" +} diff --git a/repo/packages/C/confluent-schema-registry/8/marathon.json.mustache b/repo/packages/C/confluent-schema-registry/8/marathon.json.mustache new file mode 100644 index 000000000..f09ebbebe --- /dev/null +++ b/repo/packages/C/confluent-schema-registry/8/marathon.json.mustache @@ -0,0 +1,56 @@ +{ + "id": "/{{registry.name}}", + "instances": {{registry.instances}}, + "cpus": {{registry.cpus}}, + "mem": {{registry.mem}}, + "maintainer": "partner-support@confluent.io", + "container": { + "type": "DOCKER", + "docker": { + "image": "{{resource.assets.container.docker.image}}", + "forcePullImage": true, + "network": "BRIDGE", + "portMappings": [ { + "containerPort": 8081, + "hostPort": 0, + "protocol": "tcp", + "labels": { + "VIP_0": "{{registry.name}}:8081" + } + } ] + } + }, + "portDefinitions": [ { + "name": "{{registry.name}}", + "port": 8081, + "protocol": "tcp", + "labels": { + "VIP_0": "{{registry.name}}:8081" + } + } ], + "env": { + "SCHEMA_REGISTRY_HEAP_OPTS": "-Xmx{{registry.heap}}M", + "SCHEMA_REGISTRY_KAFKASTORE_CONNECTION_URL": "{{registry.zookeeper-master}}/{{registry.kafkastore}}", + "SCHEMA_REGISTRY_SCHEMA_REGISTRY_ZK_NAMESPACE": "{{registry.kafkastore}}/{{registry.name}}" + }, + "healthChecks": [ + { + "protocol": "HTTP", + "portIndex": 0, + "path": "/", + "gracePeriodSeconds": 60, + "intervalSeconds": 60, + "timeoutSeconds": 20, + "maxConsecutiveFailures": 3, + "ignoreHttp1xx": false + } + ], + "acceptedResourceRoles": [ + "{{registry.role}}" + ], + "labels": { + "DCOS_SERVICE_NAME": "{{registry.name}}", + "DCOS_SERVICE_SCHEME": "http", + "DCOS_SERVICE_PORT_INDEX": "0" + } +} diff --git a/repo/packages/C/confluent-schema-registry/8/package.json b/repo/packages/C/confluent-schema-registry/8/package.json new file mode 100644 index 000000000..ed13b338f --- /dev/null +++ b/repo/packages/C/confluent-schema-registry/8/package.json @@ -0,0 +1,19 @@ +{ + "packagingVersion": "3.0", + "name": "confluent-schema-registry", + "version": "1.0.0-3.2.2", + "scm": "https://github.com/confluentinc/schema-registry", + "description": "Confluent Schema Registry service\n\n\tDocumentation: http://docs.confluent.io/3.2.2/schema-registry/docs/intro.html", + "maintainer": "partner-support@confluent.io", + "tags": ["kafka", "confluent", "schema", "registry"], + "preInstallNotes": "Preparing to install confluent-schema-registry", + "postInstallNotes": "confluent-schema-registry has been installed.", + "postUninstallNotes": "confluent-schema-registry was uninstalled successfully.", + "minDcosReleaseVersion" : "1.8", + "licenses": [ + { + "name": "Apache License v2", + "url": "https://raw.githubusercontent.com/confluentinc/schema-registry/master/LICENSE" + } + ] +} diff --git a/repo/packages/C/confluent-schema-registry/8/resource.json b/repo/packages/C/confluent-schema-registry/8/resource.json new file mode 100644 index 000000000..27289a51c --- /dev/null +++ b/repo/packages/C/confluent-schema-registry/8/resource.json @@ -0,0 +1,14 @@ +{ + "assets": { + "container": { + "docker": { + "image": "confluentinc/cp-schema-registry:3.2.2" + } + } + }, + "images": { + "icon-small": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_small.png", + "icon-medium": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_medium.png", + "icon-large": "https://s3-us-west-2.amazonaws.com/confluent-mesos-devel/ConfIcon_large.png" + } +}