From a0089d2427c6ed6c10383baadf552186c13b72e0 Mon Sep 17 00:00:00 2001 From: Rob Zienert Date: Fri, 15 Sep 2017 14:39:12 -0700 Subject: [PATCH] feat(pipeline_template): Convert to pipeline template endpoint (#1615) --- .../orca-pipelinetemplate.gradle | 1 + .../converter/PipelineTemplateConverter.java | 171 ++++++++++++++ .../main/resources/pipelineTemplateHeader.txt | 18 ++ .../PipelineTemplateConverterSpec.groovy | 36 +++ .../resources/convertedPipelineTemplate.yml | 199 ++++++++++++++++ .../convertedPipelineTemplateSource.json | 222 ++++++++++++++++++ .../PipelineTemplateController.groovy | 7 + 7 files changed, 654 insertions(+) create mode 100644 orca-pipelinetemplate/src/main/java/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverter.java create mode 100644 orca-pipelinetemplate/src/main/resources/pipelineTemplateHeader.txt create mode 100644 orca-pipelinetemplate/src/test/groovy/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverterSpec.groovy create mode 100644 orca-pipelinetemplate/src/test/resources/convertedPipelineTemplate.yml create mode 100644 orca-pipelinetemplate/src/test/resources/convertedPipelineTemplateSource.json diff --git a/orca-pipelinetemplate/orca-pipelinetemplate.gradle b/orca-pipelinetemplate/orca-pipelinetemplate.gradle index ddcd574b23..6d02ee4f55 100644 --- a/orca-pipelinetemplate/orca-pipelinetemplate.gradle +++ b/orca-pipelinetemplate/orca-pipelinetemplate.gradle @@ -20,4 +20,5 @@ dependencies { testCompile spinnaker.dependency("slf4jSimple") testCompile 'org.spockframework:spock-unitils:1.1-groovy-2.4-rc-2' + testCompile 'org.codehaus.groovy:groovy-json:2.4.11' } diff --git a/orca-pipelinetemplate/src/main/java/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverter.java b/orca-pipelinetemplate/src/main/java/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverter.java new file mode 100644 index 0000000000..f57fea682d --- /dev/null +++ b/orca-pipelinetemplate/src/main/java/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverter.java @@ -0,0 +1,171 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.orca.pipelinetemplate.v1schema.converter; + +import com.google.common.base.Charsets; +import com.google.common.io.Files; +import com.google.common.io.Resources; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; +import org.yaml.snakeyaml.representer.Representer; + +import java.io.File; +import java.io.IOException; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; + +// Who needs type-checking anyway? +@SuppressWarnings("unchecked") +public class PipelineTemplateConverter { + + private final static Logger log = LoggerFactory.getLogger(PipelineTemplateConverter.class); + + public String convertToPipelineTemplate(Map pipeline) { + Map p = new LinkedHashMap<>(); + p.put("schema", "1"); + p.put("id", String.format("%s-%s", pipeline.getOrDefault("application", "spinnaker"), ((String) pipeline.getOrDefault("name", "generatedTemplate")).replaceAll("\\W", ""))); + p.put("metadata", generateMetadata(pipeline)); + p.put("protect", false); + p.put("configuration", generateConfiguration(pipeline)); + p.put("variables", new ArrayList<>()); + p.put("stages", convertStages((List) pipeline.get("stages"))); + + Representer representer = new Representer(); + DumperOptions options = new DumperOptions(); + options.setIndent(2); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK); + Yaml yaml = new Yaml(representer, options); + + String output = yaml.dump(p); + + return String.format("%s%s", loadTemplateHeader(), output); + } + + private Map generateMetadata(Map pipeline) { + Map m = new LinkedHashMap<>(); + m.put("name", pipeline.getOrDefault("name", "GIVE ME A NAME")); + m.put("description", pipeline.getOrDefault("description", "GIVE ME A DESCRIPTION")); + m.put("owner", pipeline.get("lastModifiedBy")); + m.put("scopes", (pipeline.get("application") == null) ? new ArrayList<>() : Collections.singletonList(pipeline.get("application"))); + return m; + } + + private Map generateConfiguration(Map pipeline) { + Map m = new LinkedHashMap<>(); + Map cm = new LinkedHashMap<>(); + cm.put("limitConcurrent", true); + m.put("concurrentExecutions", cm); + m.put("triggers", convertTriggers((List) pipeline.get("triggers"))); + m.put("parameters", pipeline.getOrDefault("parameterConfig", new ArrayList<>())); + m.put("notifications", convertNotifications((List) pipeline.get("notifications"))); + return m; + } + + private List> convertStages(List> stages) { + return stages.stream() + .map(s -> { + List dependsOn = new ArrayList<>(); + if (s.containsKey("requisiteStageRefIds") && !((List) s.get("requisiteStageRefIds")).isEmpty()) { + dependsOn = buildStageRefIds(stages, (List) s.get("requisiteStageRefIds")); + } + + Map stage = new LinkedHashMap<>(); + stage.put("id", getStageId((String) s.get("type"), (String) s.get("refId"))); + stage.put("type", s.get("type")); + stage.put("dependsOn", dependsOn); + stage.put("name", s.get("name")); + stage.put("config", scrubStageConfig(s)); + return stage; + }) + .collect(Collectors.toList()); + } + + private static Map scrubStageConfig(Map config) { + Map working = new LinkedHashMap<>(config); + working.remove("type"); + working.remove("name"); + working.remove("refId"); + working.remove("requisiteStageRefIds"); + return working; + } + + private static List buildStageRefIds(List> stages, List requisiteStageRefIds) { + List refIds = new ArrayList<>(); + for (String refId : requisiteStageRefIds) { + Optional stage = stages.stream() + .filter(s -> refId.equals(s.get("refId"))) + .map(s -> getStageId((String) s.get("type"), (String) s.get("refId"))) + .findFirst(); + stage.ifPresent(refIds::add); + } + return refIds; + } + + private static String getStageId(String type, String refId) { + return String.format("%s%s", type, refId); + } + + private List> convertTriggers(List> triggers) { + if (triggers == null) { + return Collections.emptyList(); + } + + List> ret = new ArrayList<>(triggers.size()); + + int i = 0; + for (Map trigger : triggers) { + trigger.put("name", String.format("unnamed%d", i)); + i++; + ret.add(trigger); + } + + return ret; + } + + private List> convertNotifications(List> notifications) { + if (notifications == null) { + return Collections.emptyList(); + } + + List> ret = new ArrayList<>(notifications.size()); + + int i = 0; + for (Map notification : notifications) { + notification.put("name", String.format("%s%d", notification.get("type"), i)); + i++; + ret.add(notification); + } + + return ret; + } + + private String loadTemplateHeader() { + try { + return Files.toString(new File(Resources.getResource("pipelineTemplateHeader.txt").toURI()), Charsets.UTF_8); + } catch (IOException | URISyntaxException e) { + log.error("Could not load pipeline template header resource", e); + return "GENERATED BY spinnaker"; + } + } +} diff --git a/orca-pipelinetemplate/src/main/resources/pipelineTemplateHeader.txt b/orca-pipelinetemplate/src/main/resources/pipelineTemplateHeader.txt new file mode 100644 index 0000000000..e50eb2eb54 --- /dev/null +++ b/orca-pipelinetemplate/src/main/resources/pipelineTemplateHeader.txt @@ -0,0 +1,18 @@ +# GENERATED BY spinnaker +# +# The template generated below should be used as a base for further modifications. +# It does not make assumptions as to what things can be made into variables, +# modules, partials or Jinja expressions. This is your responsibility as the owner +# of the template. +# +# Some recommendations to massage the initial output: +# +# * Give your pipeline template a unique ID. It's best to namespace it by your +# application or team name, so that it does not conflict with other teams, +# e.g. "myteam-myTemplate". +# * Rename the pipeline stage IDs, notifications and trigger names to be more +# meaningful. Enumerated stage IDs are ultimately a detriment for long-term +# maintenance of your template. +# * Best intentions are made to order configuration, but the list of stages +# themselves are not ordered: Rearrange the stages so that they're roughly +# chronological. diff --git a/orca-pipelinetemplate/src/test/groovy/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverterSpec.groovy b/orca-pipelinetemplate/src/test/groovy/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverterSpec.groovy new file mode 100644 index 0000000000..1c83922425 --- /dev/null +++ b/orca-pipelinetemplate/src/test/groovy/com/netflix/spinnaker/orca/pipelinetemplate/v1schema/converter/PipelineTemplateConverterSpec.groovy @@ -0,0 +1,36 @@ +/* + * Copyright 2017 Netflix, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package com.netflix.spinnaker.orca.pipelinetemplate.v1schema.converter + +import groovy.json.JsonSlurper +import spock.lang.Specification + +class PipelineTemplateConverterSpec extends Specification { + + def "should convert a pipeline to an ordered pipeline template yaml document"() { + given: + def pipeline = new JsonSlurper().parse(new File("src/test/resources/convertedPipelineTemplateSource.json")) + + and: + String expected = new File("src/test/resources/convertedPipelineTemplate.yml").text + + when: + String result = new PipelineTemplateConverter().convertToPipelineTemplate(pipeline) + + then: + expected == result + } +} diff --git a/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplate.yml b/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplate.yml new file mode 100644 index 0000000000..0c33bfa1f5 --- /dev/null +++ b/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplate.yml @@ -0,0 +1,199 @@ +# GENERATED BY spinnaker +# +# The template generated below should be used as a base for further modifications. +# It does not make assumptions as to what things can be made into variables, +# modules, partials or Jinja expressions. This is your responsibility as the owner +# of the template. +# +# Some recommendations to massage the initial output: +# +# * Give your pipeline template a unique ID. It's best to namespace it by your +# application or team name, so that it does not conflict with other teams, +# e.g. "myteam-myTemplate". +# * Rename the pipeline stage IDs, notifications and trigger names to be more +# meaningful. Enumerated stage IDs are ultimately a detriment for long-term +# maintenance of your template. +# * Best intentions are made to order configuration, but the list of stages +# themselves are not ordered: Rearrange the stages so that they're roughly +# chronological. +schema: '1' +id: spinnaker-generatedTemplate +metadata: + name: GIVE ME A NAME + description: This is my favorite pipeline! + owner: example@example.com + scopes: [] +protect: false +configuration: + concurrentExecutions: + limitConcurrent: true + triggers: + - enabled: false + job: ZZ-demo + master: myMaster + name: unnamed0 + type: jenkins + - cronExpression: 0 0/12 * * * ? + enabled: true + id: b2ba0819-dbe5-42bd-a8b0-0499c131711f + name: unnamed1 + type: cron + parameters: [] + notifications: + - address: example@example.com + level: pipeline + name: email0 + type: email + when: + - pipeline.failed +variables: [] +stages: +- id: bake1 + type: bake + dependsOn: [] + name: Bake + config: + baseLabel: release + baseOs: trusty + cloudProviderType: aws + enhancedNetworking: false + extendedAttributes: {} + overrideTimeout: true + package: orca + regions: + - us-east-1 + - us-west-1 + - us-west-2 + - eu-west-1 + sendNotifications: true + showAdvancedOptions: true + stageTimeoutMs: 900000 + storeType: ebs + user: example@example.com + vmType: hvm +- id: deploy2 + type: deploy + dependsOn: + - bake1 + name: Deploy + config: + clusters: + - account: test + application: spindemo + availabilityZones: + us-west-1: + - us-west-1a + - us-west-1c + capacity: + desired: 1 + max: 1 + min: 1 + cloudProvider: aws + cooldown: 10 + copySourceCustomBlockDeviceMappings: true + ebsOptimized: false + enabledMetrics: [] + freeFormDetails: demo + healthCheckGracePeriod: 600 + healthCheckType: EC2 + iamRole: myIAMRole + instanceMonitoring: false + instanceType: m3.large + interestingHealthProviderNames: + - Amazon + keyPair: keypair + loadBalancers: + - spindemo-demo-frontend + maxRemainingAsgs: 2 + preferSourceCapacity: true + provider: aws + scaleDown: true + securityGroups: [] + stack: test + strategy: redblack + subnetType: mySubnet + suspendedProcesses: [] + tags: {} + targetGroups: [] + targetHealthyDeployPercentage: 100 + terminationPolicies: + - Default + useAmiBlockDeviceMappings: false + useSourceCapacity: true + - account: test + application: spindemo + availabilityZones: + us-east-1: + - us-east-1c + - us-east-1d + - us-east-1e + capacity: + desired: 0 + max: 0 + min: 0 + cloudProvider: aws + cooldown: 10 + ebsOptimized: false + freeFormDetails: demo + healthCheckGracePeriod: 600 + healthCheckType: EC2 + iamRole: myIAMRole + instanceMonitoring: false + instanceType: m3.large + interestingHealthProviderNames: + - Amazon + keyPair: keypair + provider: aws + securityGroups: [] + stack: test + strategy: highlander + subnetType: mySubnet + suspendedProcesses: [] + tags: {} + targetHealthyDeployPercentage: 100 + terminationPolicies: + - Default + useSourceCapacity: false +- id: checkPreconditions6 + type: checkPreconditions + dependsOn: + - wait8 + name: Check Preconditions (us-west-1) + config: + preconditions: + - context: + cluster: spindemo-test-demo + comparison: <= + credentials: test + expected: 2 + regions: + - us-west-1 + failPipeline: true + type: clusterSize +- id: checkPreconditions7 + type: checkPreconditions + dependsOn: + - wait8 + name: Check Preconditions (us-east-1) + config: + completeOtherBranchesThenFail: false + continuePipeline: false + failPipeline: true + preconditions: + - context: + cluster: spindemo-test-demo + comparison: == + credentials: test + expected: 1 + regions: + - us-east-1 + failPipeline: true + type: clusterSize +- id: wait8 + type: wait + dependsOn: + - deploy2 + name: Wait + config: + comments: Wait 2 min for the clusters to normalize before the precondition asg size checks + waitTime: 120 diff --git a/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplateSource.json b/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplateSource.json new file mode 100644 index 0000000000..57fd7197d6 --- /dev/null +++ b/orca-pipelinetemplate/src/test/resources/convertedPipelineTemplateSource.json @@ -0,0 +1,222 @@ +{ + "appConfig": {}, + "description": "This is my favorite pipeline!", + "executionEngine": "v3", + "lastModifiedBy": "example@example.com", + "limitConcurrent": true, + "notifications": [ + { + "address": "example@example.com", + "level": "pipeline", + "type": "email", + "when": [ + "pipeline.failed" + ] + } + ], + "parallel": true, + "parameterConfig": [], + "stageCounter": 7, + "stages": [ + { + "baseLabel": "release", + "baseOs": "trusty", + "cloudProviderType": "aws", + "enhancedNetworking": false, + "extendedAttributes": {}, + "name": "Bake", + "overrideTimeout": true, + "package": "orca", + "refId": "1", + "regions": [ + "us-east-1", + "us-west-1", + "us-west-2", + "eu-west-1" + ], + "requisiteStageRefIds": [], + "sendNotifications": true, + "showAdvancedOptions": true, + "stageTimeoutMs": 900000, + "storeType": "ebs", + "type": "bake", + "user": "example@example.com", + "vmType": "hvm" + }, + { + "clusters": [ + { + "account": "test", + "application": "spindemo", + "availabilityZones": { + "us-west-1": [ + "us-west-1a", + "us-west-1c" + ] + }, + "capacity": { + "desired": 1, + "max": 1, + "min": 1 + }, + "cloudProvider": "aws", + "cooldown": 10, + "copySourceCustomBlockDeviceMappings": true, + "ebsOptimized": false, + "enabledMetrics": [], + "freeFormDetails": "demo", + "healthCheckGracePeriod": 600, + "healthCheckType": "EC2", + "iamRole": "myIAMRole", + "instanceMonitoring": false, + "instanceType": "m3.large", + "interestingHealthProviderNames": [ + "Amazon" + ], + "keyPair": "keypair", + "loadBalancers": [ + "spindemo-demo-frontend" + ], + "maxRemainingAsgs": 2, + "preferSourceCapacity": true, + "provider": "aws", + "scaleDown": true, + "securityGroups": [], + "stack": "test", + "strategy": "redblack", + "subnetType": "mySubnet", + "suspendedProcesses": [], + "tags": {}, + "targetGroups": [], + "targetHealthyDeployPercentage": 100, + "terminationPolicies": [ + "Default" + ], + "useAmiBlockDeviceMappings": false, + "useSourceCapacity": true + }, + { + "account": "test", + "application": "spindemo", + "availabilityZones": { + "us-east-1": [ + "us-east-1c", + "us-east-1d", + "us-east-1e" + ] + }, + "capacity": { + "desired": 0, + "max": 0, + "min": 0 + }, + "cloudProvider": "aws", + "cooldown": 10, + "ebsOptimized": false, + "freeFormDetails": "demo", + "healthCheckGracePeriod": 600, + "healthCheckType": "EC2", + "iamRole": "myIAMRole", + "instanceMonitoring": false, + "instanceType": "m3.large", + "interestingHealthProviderNames": [ + "Amazon" + ], + "keyPair": "keypair", + "provider": "aws", + "securityGroups": [], + "stack": "test", + "strategy": "highlander", + "subnetType": "mySubnet", + "suspendedProcesses": [], + "tags": {}, + "targetHealthyDeployPercentage": 100, + "terminationPolicies": [ + "Default" + ], + "useSourceCapacity": false + } + ], + "name": "Deploy", + "refId": "2", + "requisiteStageRefIds": [ + "1" + ], + "type": "deploy" + }, + { + "name": "Check Preconditions (us-west-1)", + "preconditions": [ + { + "context": { + "cluster": "spindemo-test-demo", + "comparison": "<=", + "credentials": "test", + "expected": 2, + "regions": [ + "us-west-1" + ] + }, + "failPipeline": true, + "type": "clusterSize" + } + ], + "refId": "6", + "requisiteStageRefIds": [ + "8" + ], + "type": "checkPreconditions" + }, + { + "completeOtherBranchesThenFail": false, + "continuePipeline": false, + "failPipeline": true, + "name": "Check Preconditions (us-east-1)", + "preconditions": [ + { + "context": { + "cluster": "spindemo-test-demo", + "comparison": "==", + "credentials": "test", + "expected": 1, + "regions": [ + "us-east-1" + ] + }, + "failPipeline": true, + "type": "clusterSize" + } + ], + "refId": "7", + "requisiteStageRefIds": [ + "8" + ], + "type": "checkPreconditions" + }, + { + "comments": "Wait 2 min for the clusters to normalize before the precondition asg size checks", + "name": "Wait", + "refId": "8", + "requisiteStageRefIds": [ + "2" + ], + "type": "wait", + "waitTime": 120 + } + ], + "triggers": [ + { + "enabled": false, + "job": "ZZ-demo", + "master": "myMaster", + "type": "jenkins" + }, + { + "cronExpression": "0 0/12 * * * ?", + "enabled": true, + "id": "b2ba0819-dbe5-42bd-a8b0-0499c131711f", + "type": "cron" + } + ], + "updateTs": "1504243528000" +} diff --git a/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/PipelineTemplateController.groovy b/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/PipelineTemplateController.groovy index 9e7c55ed2e..c45e4b3f10 100644 --- a/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/PipelineTemplateController.groovy +++ b/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/PipelineTemplateController.groovy @@ -17,11 +17,13 @@ package com.netflix.spinnaker.orca.controllers import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException import com.netflix.spinnaker.orca.pipelinetemplate.PipelineTemplateService +import com.netflix.spinnaker.orca.pipelinetemplate.v1schema.converter.PipelineTemplateConverter import com.netflix.spinnaker.orca.pipelinetemplate.v1schema.model.PipelineTemplate import com.netflix.spinnaker.orca.pipelinetemplate.v1schema.model.TemplateConfiguration.TemplateSource import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.boot.autoconfigure.condition.ConditionalOnExpression +import org.springframework.web.bind.annotation.RequestBody import org.springframework.web.bind.annotation.RequestMapping import org.springframework.web.bind.annotation.RequestMethod import org.springframework.web.bind.annotation.RequestParam @@ -43,4 +45,9 @@ class PipelineTemplateController { pipelineTemplateService.resolveTemplate(new TemplateSource(source: source)) } + + @RequestMapping(value = "/convertPipelineToTemplate", method = RequestMethod.POST, produces = 'text/x-yaml') + String convertPipelineToPipelineTemplate(@RequestBody Map pipeline) { + new PipelineTemplateConverter().convertToPipelineTemplate(pipeline) + } }