diff --git a/gradle.properties b/gradle.properties index 6c41963b4c..74edffd2a6 100644 --- a/gradle.properties +++ b/gradle.properties @@ -1,5 +1,5 @@ -fiatVersion=1.43.0 -korkVersion=7.211.0 +fiatVersion=1.44.0 +korkVersion=7.221.0 kotlinVersion=1.5.32 org.gradle.parallel=true org.gradle.jvmargs=-Xmx6g diff --git a/keiko-sql/src/main/kotlin/com/netflix/spinnaker/q/sql/SqlQueue.kt b/keiko-sql/src/main/kotlin/com/netflix/spinnaker/q/sql/SqlQueue.kt index b65e8b16f2..614bebfd75 100644 --- a/keiko-sql/src/main/kotlin/com/netflix/spinnaker/q/sql/SqlQueue.kt +++ b/keiko-sql/src/main/kotlin/com/netflix/spinnaker/q/sql/SqlQueue.kt @@ -77,7 +77,8 @@ class SqlQueue( override val publisher: EventPublisher, private val sqlRetryProperties: SqlRetryProperties, private val ULID: ULID = ULID(), - private val poolName: String = "default" + private val poolName: String = "default", + private val containsMessageBatchSize: Int = 100, ) : MonitorableQueue { companion object { @@ -187,33 +188,32 @@ class SqlQueue( } private fun doContainsMessage(predicate: (Message) -> Boolean): Boolean { - val batchSize = 100 + val batchSize = containsMessageBatchSize var found = false var lastId = "0" do { - val rs: ResultSet = withRetry(READ) { + val rs = withRetry(READ) { jooq.select(idField, fingerprintField, bodyField) .from(messagesTable) .where(idField.gt(lastId)) + .orderBy(idField.asc()) .limit(batchSize) .fetch() - .intoResultSet() } - while (!found && rs.next()) { + val rsIterator = rs.iterator() + while (!found && rsIterator.hasNext()) { + val record = rsIterator.next() + val body = record[bodyField, String::class.java] try { - found = predicate.invoke(mapper.readValue(rs.getString("body"))) + found = predicate.invoke(mapper.readValue(body)) } catch (e: Exception) { - log.error( - "Failed reading message with fingerprint: ${rs.getString("fingerprint")} " + - "message: ${rs.getString("body")}", - e - ) + log.error("Failed reading message with fingerprint: ${record[fingerprintField, String::class.java]} message: $body", e) } - lastId = rs.getString("id") + lastId = record[idField, String::class.java] } - } while (!found && rs.row == batchSize) + } while (!found && rs.isNotEmpty) return found } diff --git a/keiko-sql/src/test/kotlin/com/netflix/spinnaker/q/sql/SqlQueueTest.kt b/keiko-sql/src/test/kotlin/com/netflix/spinnaker/q/sql/SqlQueueTest.kt index 2d5f3235c0..58564687fb 100644 --- a/keiko-sql/src/test/kotlin/com/netflix/spinnaker/q/sql/SqlQueueTest.kt +++ b/keiko-sql/src/test/kotlin/com/netflix/spinnaker/q/sql/SqlQueueTest.kt @@ -15,6 +15,12 @@ import com.netflix.spinnaker.q.TestMessage import com.netflix.spinnaker.q.metrics.EventPublisher import com.netflix.spinnaker.q.metrics.MonitorableQueueTest import com.netflix.spinnaker.q.metrics.QueueEvent +import com.netflix.spinnaker.time.MutableClock +import com.nhaarman.mockito_kotlin.mock +import org.assertj.core.api.Assertions.assertThat +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.BeforeEach import java.time.Clock import java.time.Duration import java.util.Optional @@ -37,7 +43,8 @@ private val createQueueNoPublisher = { clock: Clock, private fun createQueue(clock: Clock, deadLetterCallback: DeadMessageCallback, - publisher: EventPublisher?): SqlQueue { + publisher: EventPublisher?, + containsMessageBatchSize: Int = 5): SqlQueue { return SqlQueue( queueName = "test", schemaVersion = 1, @@ -66,7 +73,8 @@ private fun createQueue(clock: Clock, sqlRetryProperties = SqlRetryProperties( transactions = retryPolicy, reads = retryPolicy - ) + ), + containsMessageBatchSize = containsMessageBatchSize, ) } @@ -78,3 +86,49 @@ private val retryPolicy: RetryProperties = RetryProperties( maxRetries = 1, backoffMs = 10 // minimum allowed ) + +class SqlQueueSpecificTests { + private val batchSize = 5 + private val clock = MutableClock() + private val deadMessageHandler: DeadMessageCallback = mock() + private val publisher: EventPublisher = mock() + private var queue: SqlQueue? = null + + @BeforeEach + fun setup() { + queue = createQueue(clock, deadMessageHandler, publisher, batchSize) + } + + @AfterEach + fun cleanup() { + cleanupCallback() + } + + @Test + fun `doContainsMessage works with no messages present`() { + assertThat(doContainsMessagePayload("test")).isFalse + } + + @Test + fun `doContainsMessage works with a single batch`() { + pushTestMessages(batchSize) + assertThat(doContainsMessagePayload("${batchSize-1}")).isTrue + assertThat(doContainsMessagePayload("")).isFalse + } + + @Test + fun `doContainsMessage handles multiple batches during search`() { + pushTestMessages(batchSize * 2) + assertThat(doContainsMessagePayload("${batchSize+1}")).isTrue + assertThat(doContainsMessagePayload("")).isFalse + } + + private fun pushTestMessages(numberOfMessages: Int) { + for (i in 1 .. numberOfMessages) { + queue?.push(TestMessage(i.toString())) + } + } + + private fun doContainsMessagePayload(payload: String): Boolean? = + queue?.containsMessage { message -> message is TestMessage && message.payload == payload } +} diff --git a/orca-applications/src/main/groovy/com/netflix/spinnaker/orca/applications/tasks/DeleteApplicationTask.groovy b/orca-applications/src/main/groovy/com/netflix/spinnaker/orca/applications/tasks/DeleteApplicationTask.groovy index f88800e15c..ae0d991f39 100644 --- a/orca-applications/src/main/groovy/com/netflix/spinnaker/orca/applications/tasks/DeleteApplicationTask.groovy +++ b/orca-applications/src/main/groovy/com/netflix/spinnaker/orca/applications/tasks/DeleteApplicationTask.groovy @@ -18,6 +18,8 @@ package com.netflix.spinnaker.orca.applications.tasks import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.front50.Front50Service @@ -93,6 +95,15 @@ class DeleteApplicationTask extends AbstractFront50Task { } log.error("Could not delete application", e) return TaskResult.builder(ExecutionStatus.TERMINAL).outputs(outputs).build() + } catch (SpinnakerHttpException httpException){ + if (httpException.responseCode == 404) { + return TaskResult.SUCCEEDED + } + log.error("Could not delete application", httpException) + return TaskResult.builder(ExecutionStatus.TERMINAL).outputs(outputs).build() + } catch (SpinnakerServerException serverException) { + log.error("Could not delete application", serverException) + return TaskResult.builder(ExecutionStatus.TERMINAL).outputs(outputs).build() } return TaskResult.builder(ExecutionStatus.SUCCEEDED).outputs(outputs).build() } diff --git a/orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/ServerGroupProperties.java b/orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/ServerGroupProperties.java new file mode 100644 index 0000000000..991e7675b5 --- /dev/null +++ b/orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/ServerGroupProperties.java @@ -0,0 +1,47 @@ +/* + * Copyright 2024 Harness, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.orca.clouddriver.tasks.servergroup; + +import org.springframework.boot.context.properties.ConfigurationProperties; +import org.springframework.stereotype.Component; + +@Component +@ConfigurationProperties(prefix = "server-group") +public class ServerGroupProperties { + + private Resize resize = new Resize(); + + public static class Resize { + private boolean matchInstancesSize; + + public void setMatchInstancesSize(boolean matchInstancesSize) { + this.matchInstancesSize = matchInstancesSize; + } + + public boolean isMatchInstancesSize() { + return matchInstancesSize; + } + } + + public void setResize(Resize resize) { + this.resize = resize; + } + + public Resize getResize() { + return resize; + } +} diff --git a/orca-clouddriver/src/main/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java b/orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java similarity index 70% rename from orca-clouddriver/src/main/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java rename to orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java index 9015b2cc63..d9f05d30f5 100644 --- a/orca-clouddriver/src/main/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java +++ b/orca-clouddriver/src/main/java/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTask.java @@ -1,11 +1,11 @@ /* - * Copyright 2014 Netflix, Inc. + * Copyright 2024 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -27,6 +27,12 @@ @Component public class WaitForCapacityMatchTask extends AbstractInstancesCheckTask { + private final ServerGroupProperties serverGroupProperties; + + public WaitForCapacityMatchTask(ServerGroupProperties serverGroupProperties) { + this.serverGroupProperties = serverGroupProperties; + } + @Override protected Map> getServerGroups(StageExecution stage) { return (Map>) stage.getContext().get("deploy.server.groups"); @@ -88,27 +94,37 @@ protected boolean hasSucceeded( desired = capacity.getDesired(); } - Integer targetDesiredSize = - Optional.ofNullable((Number) context.get("targetDesiredSize")) - .map(Number::intValue) - .orElse(null); - - splainer.add( - String.format( - "checking if capacity matches (desired=%s, target=%s current=%s)", - desired, targetDesiredSize == null ? "none" : targetDesiredSize, instances.size())); - if (targetDesiredSize != null && targetDesiredSize != 0) { - // `targetDesiredSize` is derived from `targetHealthyDeployPercentage` and if present, - // then scaling has succeeded if the number of instances is greater than this value. - if (instances.size() < targetDesiredSize) { + if (serverGroupProperties.getResize().isMatchInstancesSize()) { + splainer.add( + "checking if capacity matches (desired=${desired}, instances.size()=${instances.size()}) "); + if (desired == null || desired != instances.size()) { splainer.add( - "short-circuiting out of WaitForCapacityMatchTask because targetDesired and current capacity don't match"); + "short-circuiting out of WaitForCapacityMatchTask because expected and current capacity don't match}"); return false; } - } else if (desired == null || desired != instances.size()) { + } else { + Integer targetDesiredSize = + Optional.ofNullable((Number) context.get("targetDesiredSize")) + .map(Number::intValue) + .orElse(null); + splainer.add( - "short-circuiting out of WaitForCapacityMatchTask because expected and current capacity don't match"); - return false; + String.format( + "checking if capacity matches (desired=%s, target=%s current=%s)", + desired, targetDesiredSize == null ? "none" : targetDesiredSize, instances.size())); + if (targetDesiredSize != null && targetDesiredSize != 0) { + // `targetDesiredSize` is derived from `targetHealthyDeployPercentage` and if present, + // then scaling has succeeded if the number of instances is greater than this value. + if (instances.size() < targetDesiredSize) { + splainer.add( + "short-circuiting out of WaitForCapacityMatchTask because targetDesired and current capacity don't match"); + return false; + } + } else if (desired == null || desired != instances.size()) { + splainer.add( + "short-circuiting out of WaitForCapacityMatchTask because expected and current capacity don't match"); + return false; + } } boolean disabled = Boolean.TRUE.equals(serverGroup.getDisabled()); diff --git a/orca-clouddriver/src/test/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTaskSpec.groovy b/orca-clouddriver/src/test/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTaskSpec.groovy index 3d1bfab4cd..3e9f1da86a 100644 --- a/orca-clouddriver/src/test/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTaskSpec.groovy +++ b/orca-clouddriver/src/test/groovy/com/netflix/spinnaker/orca/clouddriver/tasks/servergroup/WaitForCapacityMatchTaskSpec.groovy @@ -30,7 +30,7 @@ import spock.lang.Unroll class WaitForCapacityMatchTaskSpec extends Specification { CloudDriverService cloudDriverService = Mock() - @Subject WaitForCapacityMatchTask task = new WaitForCapacityMatchTask() { + @Subject WaitForCapacityMatchTask task = new WaitForCapacityMatchTask(new ServerGroupProperties()) { @Override void verifyServerGroupsExist(StageExecution stage) { // do nothing @@ -264,6 +264,60 @@ class WaitForCapacityMatchTaskSpec extends Specification { true || 4 | [min: 3, max: 10, desired: 4] | [min: "1", max: "50", desired: "5"] } + @Unroll + void 'should use number of instances when determining if scaling has succeeded even if targetHealthyDeployPercentage is defined'() { + def serverGroupProperties = new ServerGroupProperties() + def resize = new ServerGroupProperties.Resize() + resize.setMatchInstancesSize(true) + serverGroupProperties.setResize(resize) + WaitForCapacityMatchTask task = new WaitForCapacityMatchTask(serverGroupProperties) { + @Override + void verifyServerGroupsExist(StageExecution stage) { + // do nothing + } + } + when: + def context = [ + capacity: [ + min: configured.min, + max: configured.max, + desired: configured.desired + ], + targetHealthyDeployPercentage: targetHealthyDeployPercentage, + targetDesiredSize: targetHealthyDeployPercentage + ? Math.round(targetHealthyDeployPercentage * configured.desired / 100) : null + ] + + def serverGroup = ModelUtils.serverGroup([ + asg: [ + desiredCapacity: asg.desired + ], + capacity: [ + min: asg.min, + max: asg.max, + desired: asg.desired + ] + ]) + + def instances = [] + (1..healthy).each { + instances << ModelUtils.instance([health: [[state: 'Up']]]) + } + + then: + result == task.hasSucceeded( + new StageExecutionImpl(PipelineExecutionImpl.newPipeline("orca"), "", "", context), + serverGroup, instances, null + ) + + where: + result || healthy | asg | configured | targetHealthyDeployPercentage + false || 5 | [min: 10, max: 15, desired: 15] | [min: 10, max: 15, desired: 15] | 85 + false || 12 | [min: 10, max: 15, desired: 15] | [min: 10, max: 15, desired: 15] | 85 + false || 13 | [min: 10, max: 15, desired: 15] | [min: 10, max: 15, desired: 15] | 85 + true || 15 | [min: 10, max: 15, desired: 15] | [min: 10, max: 15, desired: 15] | 100 + } + private static Instance makeInstance(id, healthState = 'Up') { ModelUtils.instance([instanceId: id, health: [ [ state: healthState ] ]]) } diff --git a/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/config/CloudDriverConfigurationTest.java b/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/config/CloudDriverConfigurationTest.java index 6d22c323b3..cea63b2a69 100644 --- a/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/config/CloudDriverConfigurationTest.java +++ b/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/config/CloudDriverConfigurationTest.java @@ -78,7 +78,7 @@ public OkHttpClient.Builder get(@NotNull ServiceEndpoint service) { ObjectMapper objectMapper = new ObjectMapper(); RestAdapter.LogLevel logLevel = RestAdapter.LogLevel.FULL; - RequestInterceptor requestInterceptor = new SpinnakerRequestInterceptor(null); + RequestInterceptor requestInterceptor = new SpinnakerRequestInterceptor(true); this.clouddriverRetrofitBuilder = new CloudDriverConfiguration.ClouddriverRetrofitBuilder( diff --git a/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/tasks/monitoreddeploy/EvaluateDeploymentHealthTaskTest.java b/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/tasks/monitoreddeploy/EvaluateDeploymentHealthTaskTest.java index c8220e0390..1a31a4b4db 100644 --- a/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/tasks/monitoreddeploy/EvaluateDeploymentHealthTaskTest.java +++ b/orca-clouddriver/src/test/java/com/netflix/spinnaker/orca/clouddriver/tasks/monitoreddeploy/EvaluateDeploymentHealthTaskTest.java @@ -36,7 +36,6 @@ import com.netflix.spectator.api.NoopRegistry; import com.netflix.spinnaker.config.DeploymentMonitorDefinition; import com.netflix.spinnaker.kork.test.log.MemoryAppender; -import com.netflix.spinnaker.okhttp.OkHttpClientConfigurationProperties; import com.netflix.spinnaker.okhttp.SpinnakerRequestInterceptor; import com.netflix.spinnaker.orca.api.pipeline.TaskResult; import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus; @@ -119,7 +118,7 @@ void setup(TestInfo testInfo) { new DeploymentMonitorServiceProvider( okClient, retrofitLogLevel, - new SpinnakerRequestInterceptor(new OkHttpClientConfigurationProperties()), + new SpinnakerRequestInterceptor(true), deploymentMonitorDefinitions); evaluateDeploymentHealthTask = new EvaluateDeploymentHealthTask(deploymentMonitorServiceProvider, new NoopRegistry()); diff --git a/orca-core/orca-core.gradle b/orca-core/orca-core.gradle index e5bfcbd444..1608c5e2ac 100644 --- a/orca-core/orca-core.gradle +++ b/orca-core/orca-core.gradle @@ -51,7 +51,7 @@ dependencies { implementation("org.apache.httpcomponents:httpclient") implementation("javax.servlet:javax.servlet-api:4.0.1") implementation("javax.validation:validation-api") - implementation("com.jayway.jsonpath:json-path:2.2.0") + implementation("com.jayway.jsonpath:json-path") implementation("org.yaml:snakeyaml") implementation("org.codehaus.groovy:groovy") implementation("net.javacrumbs.shedlock:shedlock-spring:4.44.0") diff --git a/orca-core/src/main/java/com/netflix/spinnaker/orca/config/ExecutionConfigurationProperties.java b/orca-core/src/main/java/com/netflix/spinnaker/orca/config/ExecutionConfigurationProperties.java index 66de4b760d..72e4f9d0fa 100644 --- a/orca-core/src/main/java/com/netflix/spinnaker/orca/config/ExecutionConfigurationProperties.java +++ b/orca-core/src/main/java/com/netflix/spinnaker/orca/config/ExecutionConfigurationProperties.java @@ -42,6 +42,9 @@ public class ExecutionConfigurationProperties { */ private Set allowedOrchestrationExecutions = Set.of(); + /** flag to include/exclude the set of allowed accounts in an execution context */ + private boolean includeAllowedAccounts = true; + /** * helper method that returns an {@link Optional} object if the * orchestration execution provided as an input is in the allowed orchestration executions set. diff --git a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperator.java b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperator.java index dce150404d..2e0b45a543 100644 --- a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperator.java +++ b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperator.java @@ -141,15 +141,15 @@ public PipelineExecution restartStage( private PipelineExecution updatePreconditionStageExpression( Map restartDetails, PipelineExecution execution) { - List preconditionList = getPreconditionsFromStage(restartDetails); - if (preconditionList.isEmpty()) { + Map> preconditionMap = getPreconditionsFromStage(restartDetails); + if (preconditionMap.isEmpty()) { return execution; } for (StageExecution stage : execution.getStages()) { if (stage.getType() != null && stage.getType().equalsIgnoreCase("checkPreconditions")) { if (stage.getContext().get("preconditions") != null) { - stage.getContext().replace("preconditions", preconditionList); + stage.getContext().replace("preconditions", preconditionMap.get(stage.getRefId())); repository.storeStage(stage); log.info("Updated preconditions for CheckPreconditions stage"); } @@ -158,8 +158,8 @@ private PipelineExecution updatePreconditionStageExpression( return execution; } - private List getPreconditionsFromStage(Map restartDetails) { - List preconditionList = new ArrayList(); + private Map> getPreconditionsFromStage(Map restartDetails) { + Map> preconditionMap = new HashMap<>(); Map pipelineConfigMap = new HashMap(restartDetails); List keysToRetain = new ArrayList(); @@ -173,12 +173,13 @@ private List getPreconditionsFromStage(Map restartDetails) { List pipelineStageList = pipelineStageMap.get(keysToRetain.get(0)); for (Map stageMap : pipelineStageList) { if (stageMap.get("type").toString().equalsIgnoreCase("checkPreconditions")) { - preconditionList = (List) stageMap.get("preconditions"); + preconditionMap.put( + (String) stageMap.get("refId"), (List) stageMap.get("preconditions")); log.info("Retrieved preconditions for CheckPreconditions stage"); } } } - return preconditionList; + return preconditionMap; } private PipelineExecution doInternal( diff --git a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncher.java b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncher.java index 3d5019edc1..6255292179 100644 --- a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncher.java +++ b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncher.java @@ -230,6 +230,7 @@ private PipelineExecution parsePipeline(Map config) { config.get("source"), PipelineExecution.PipelineSource.class)) .withSpelEvaluator(getString(config, "spelEvaluator")) .withTemplateVariables(getMap(config, "templateVariables")) + .withIncludeAllowedAccounts(executionConfigurationProperties.isIncludeAllowedAccounts()) .build(); } @@ -269,8 +270,14 @@ private PipelineExecution parseOrchestration(Map config) { PipelineExecution.AuthenticationDetails auth = new PipelineExecution.AuthenticationDetails(); auth.setUser(getString(config, "user")); - orchestration.setAuthentication( - PipelineExecutionImpl.AuthenticationHelper.build().orElse(auth)); + if (executionConfigurationProperties.isIncludeAllowedAccounts()) { + orchestration.setAuthentication( + PipelineExecutionImpl.AuthenticationHelper.build().orElse(auth)); + } else { + orchestration.setAuthentication( + PipelineExecutionImpl.AuthenticationHelper.buildWithoutAccounts().orElse(auth)); + } + orchestration.setOrigin((String) config.getOrDefault("origin", "unknown")); orchestration.setStartTimeExpiry((Long) config.get("startTimeExpiry")); orchestration.setSpelEvaluator(getString(config, "spelEvaluator")); diff --git a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilder.java b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilder.java index 84235d378c..c3c56c2a73 100644 --- a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilder.java +++ b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilder.java @@ -26,10 +26,17 @@ @Deprecated public class PipelineBuilder { + private boolean includeAllowedAccounts; + public PipelineBuilder(String application) { pipeline = PipelineExecutionImpl.newPipeline(application); } + public PipelineBuilder withIncludeAllowedAccounts(boolean includeAllowedAccounts) { + this.includeAllowedAccounts = includeAllowedAccounts; + return this; + } + public PipelineBuilder withId(String id) { if (!Strings.isNullOrEmpty(id)) { pipeline.setId(id); @@ -84,22 +91,44 @@ public PipelineBuilder withStage(String type) { } public PipelineBuilder withStages(List> stages) { - if (stages != null) { - stages.forEach( - it -> { - String type = it.remove("type").toString(); - String name = it.containsKey("name") ? it.remove("name").toString() : null; - withStage(type, name != null ? name : type, it); - }); + if (stages == null) { + throw new IllegalArgumentException( + "null stages in pipeline '" + + pipeline.getName() + + "' in application '" + + pipeline.getApplication() + + "'"); } + stages.forEach( + it -> { + String name = it.containsKey("name") ? it.remove("name").toString() : null; + if (!it.containsKey("type")) { + throw new IllegalArgumentException( + "type missing from pipeline '" + + pipeline.getName() + + "' in application '" + + pipeline.getApplication() + + "', stage name: '" + + name + + "'"); + } + String type = it.remove("type").toString(); + withStage(type, name != null ? name : type, it); + }); return this; } public PipelineExecution build() { pipeline.setBuildTime(System.currentTimeMillis()); - pipeline.setAuthentication( - PipelineExecutionImpl.AuthenticationHelper.build() - .orElse(new PipelineExecution.AuthenticationDetails())); + if (this.includeAllowedAccounts) { + pipeline.setAuthentication( + PipelineExecutionImpl.AuthenticationHelper.build() + .orElse(new PipelineExecution.AuthenticationDetails())); + } else { + pipeline.setAuthentication( + PipelineExecutionImpl.AuthenticationHelper.buildWithoutAccounts() + .orElse(new PipelineExecution.AuthenticationDetails())); + } return pipeline; } diff --git a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImpl.java b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImpl.java index f03fc6e43c..34430668f3 100644 --- a/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImpl.java +++ b/orca-core/src/main/java/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImpl.java @@ -444,5 +444,13 @@ public static Optional build() { return Optional.empty(); } + + public static Optional buildWithoutAccounts() { + Optional spinnakerUserOptional = AuthenticatedRequest.getSpinnakerUser(); + if (spinnakerUserOptional.isPresent()) { + return Optional.of(new AuthenticationDetails(spinnakerUserOptional.orElse(null))); + } + return Optional.empty(); + } } } diff --git a/orca-core/src/test/groovy/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImplSpec.groovy b/orca-core/src/test/groovy/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImplSpec.groovy index c005bd2b19..9bda671abc 100644 --- a/orca-core/src/test/groovy/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImplSpec.groovy +++ b/orca-core/src/test/groovy/com/netflix/spinnaker/orca/pipeline/model/PipelineExecutionImplSpec.groovy @@ -57,6 +57,19 @@ class PipelineExecutionImplSpec extends Specification { authenticationDetails.allowedAccounts == ["Account1", "Account2"] as Set } + def "should build AuthenticationDetails without accounts"() { + given: + MDC.put(Header.USER.header, "SpinnakerUser") + MDC.put(Header.ACCOUNTS.header, "Account1,Account2") + + when: + def authenticationDetails = PipelineExecutionImpl.AuthenticationHelper.buildWithoutAccounts().get() + + then: + authenticationDetails.user == "SpinnakerUser" + authenticationDetails.allowedAccounts == [] as Set + } + def "should calculate context from outputs of all stages"() { given: def pipeline = pipeline { diff --git a/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperatorTest.java b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperatorTest.java index a3220b5053..a8864ae091 100644 --- a/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperatorTest.java +++ b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/CompoundExecutionOperatorTest.java @@ -41,6 +41,9 @@ final class CompoundExecutionOperatorTest { private static final String PIPELINE = "mypipeline"; private static final String EXECUTION_ID = "EXECUTION_ID"; private static final String STAGE_ID = "STAGE_ID"; + private static final String UPSTREAM_STAGE = "UPSTREAM_STAGE"; + private static final String UPSTREAM_STAGE_ID = "UPSTREAM_STAGE_ID"; + private static final int PRECONDITION_STAGE_LIST_SIZE = 2; private final ExecutionRepository repository = mock(ExecutionRepository.class); private final ExecutionRunner runner = mock(ExecutionRunner.class); private final RetrySupport retrySupport = mock(RetrySupport.class); @@ -76,6 +79,46 @@ void restartStageWithValidExpression() { assertEquals(APPLICATION, updatedExecution.getApplication()); } + @Test + void restartUpstreamStageWithMultiplePreconditionStages() { + // ARRANGE + StageExecution upstreamStage = buildUpstreamStage(); + List preconditionStages = new ArrayList<>(PRECONDITION_STAGE_LIST_SIZE); + List executionStages = execution.getStages(); + executionStages.add(upstreamStage); + + for (int i = 0; i < PRECONDITION_STAGE_LIST_SIZE; i++) { + StageExecution preconditionStage = + buildPreconditionStage("expression_" + i, "precondition_" + i, "precondition_stage_" + i); + preconditionStages.add(preconditionStage); + executionStages.add(preconditionStage); + } + + Map restartDetails = + Map.of( + "application", APPLICATION, + "name", PIPELINE, + "executionId", EXECUTION_ID, + "stages", buildExpectedRestartStageList(preconditionStages, upstreamStage)); + + when(repository.retrieve(any(), anyString())).thenReturn(execution); + when(repository.handlesPartition(execution.getPartition())).thenReturn(true); + + // ACT + PipelineExecution updatedExecution = + executionOperator.restartStage(EXECUTION_ID, STAGE_ID, restartDetails); + + // ASSERT + assertEquals(APPLICATION, updatedExecution.getApplication()); + for (int i = 0, size = preconditionStages.size(); i < size; i++) { + StageExecution originalPreconditionStage = preconditionStages.get(i); + StageExecution updatedPreconditionStage = updatedExecution.getStages().get(i + 1); + assertEquals(originalPreconditionStage.getContext(), updatedPreconditionStage.getContext()); + assertEquals(originalPreconditionStage.getType(), updatedPreconditionStage.getType()); + assertEquals(originalPreconditionStage.getName(), updatedPreconditionStage.getName()); + } + } + @Test void restartStageWithNoPreconditions() { @@ -135,6 +178,55 @@ private PipelineExecution buildExpectedExecution( return execution; } + private List> buildExpectedRestartStageList( + List preconditionStages, StageExecution upstreamStage) { + List> restartStageList = new ArrayList<>(preconditionStages.size() + 1); + + Map upstreamStageMap = + Map.of( + "name", upstreamStage.getName(), + "type", upstreamStage.getType()); + restartStageList.add(upstreamStageMap); + + for (StageExecution stage : preconditionStages) { + Map stageMap = + Map.of( + "name", stage.getName(), + "type", stage.getType(), + "preconditions", stage.getContext().get("preconditions")); + restartStageList.add(stageMap); + } + return restartStageList; + } + + private StageExecution buildPreconditionStage( + String stageStatus, String stageId, String preConditionStageName) { + StageExecution preconditionStage = new StageExecutionImpl(); + preconditionStage.setId(stageId); + preconditionStage.setType("checkPreconditions"); + preconditionStage.setName(preConditionStageName); + Map contextMap = new HashMap<>(); + contextMap.put("stageName", UPSTREAM_STAGE); + contextMap.put("stageStatus", stageStatus); + List> preconditionList = new ArrayList<>(); + Map preconditionMap = new HashMap<>(); + preconditionMap.put("context", contextMap); + preconditionMap.put("failPipeline", true); + preconditionMap.put("type", "stageStatus"); + preconditionList.add(preconditionMap); + contextMap.put("preconditions", preconditionList); + preconditionStage.setContext(contextMap); + return preconditionStage; + } + + private StageExecution buildUpstreamStage() { + StageExecution upstreamStage = new StageExecutionImpl(); + upstreamStage.setId(UPSTREAM_STAGE_ID); + upstreamStage.setType("manualJudgment"); + upstreamStage.setName(UPSTREAM_STAGE); + return upstreamStage; + } + private Map buildExpectedRestartDetailsMap(String expression) { Map restartDetails = new HashMap<>(); List pipelineStageList = new ArrayList(); diff --git a/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncherTest.java b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncherTest.java index f9400e1e1b..30c8437236 100644 --- a/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncherTest.java +++ b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/ExecutionLauncherTest.java @@ -16,13 +16,14 @@ package com.netflix.spinnaker.orca.pipeline; +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.*; import com.fasterxml.jackson.databind.ObjectMapper; import com.netflix.spectator.api.Registry; +import com.netflix.spinnaker.kork.common.Header; import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionType; import com.netflix.spinnaker.orca.api.pipeline.models.PipelineExecution; import com.netflix.spinnaker.orca.config.ExecutionConfigurationProperties; @@ -31,12 +32,14 @@ import java.time.Clock; import java.util.Map; import java.util.Optional; +import java.util.Set; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.DisplayName; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; +import org.slf4j.MDC; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.boot.context.properties.EnableConfigurationProperties; import org.springframework.boot.test.context.SpringBootTest; @@ -73,6 +76,7 @@ public void setup() { clock = Clock.systemUTC(); pipelineValidator = Optional.empty(); registry = Optional.empty(); + MDC.clear(); executionLauncher = new ExecutionLauncher( @@ -199,6 +203,135 @@ public void testOrchestrationExecutionWhenUserIsNotInAllowList() throws Exceptio + " disabled for user: not-explicitly-permitted-user@abc.com"); } + @DisplayName( + "when includeAllowedAccounts: true, then the orchestration should contain Spinnaker accounts") + @Test + public void testIncludeSpinnakerAccountsInOrchestration() throws Exception { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // override properties to allow orchestration executions + ExecutionConfigurationProperties executionConfigurationProperties = + new ExecutionConfigurationProperties(); + executionConfigurationProperties.setBlockOrchestrationExecutions(false); + executionLauncher = + new ExecutionLauncher( + objectMapper, + executionRepository, + executionRunner, + clock, + applicationEventPublisher, + pipelineValidator, + registry, + executionConfigurationProperties); + + // when + PipelineExecution pipelineExecution = + executionLauncher.start( + ExecutionType.ORCHESTRATION, getConfigJson("ad-hoc/deploy-manifest.json")); + + // then + // verify that the execution runner attempted to start the execution as expected + verify(executionRunner).start(pipelineExecution); + // verify that accounts are set in the pipeline execution + assertThat(pipelineExecution.getAuthentication().getAllowedAccounts()) + .isEqualTo(Set.of("Account1", "Account2")); + } + + @DisplayName( + "when includeAllowedAccounts: false, then the orchestration should not contain Spinnaker accounts") + @Test + public void testExcludeSpinnakerAccountsFromOrchestration() throws Exception { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // override properties to 1. allow orchestration executions and 2. set includeAllowedAccounts to + // false + ExecutionConfigurationProperties executionConfigurationProperties = + new ExecutionConfigurationProperties(); + executionConfigurationProperties.setBlockOrchestrationExecutions(false); + executionConfigurationProperties.setIncludeAllowedAccounts(false); + executionLauncher = + new ExecutionLauncher( + objectMapper, + executionRepository, + executionRunner, + clock, + applicationEventPublisher, + pipelineValidator, + registry, + executionConfigurationProperties); + + // when + PipelineExecution pipelineExecution = + executionLauncher.start( + ExecutionType.ORCHESTRATION, getConfigJson("ad-hoc/deploy-manifest.json")); + + // then + // verify that the execution runner attempted to start the execution as expected + verify(executionRunner).start(pipelineExecution); + // verify that accounts are not set in the pipeline execution + assertThat(pipelineExecution.getAuthentication().getAllowedAccounts()).isEqualTo(Set.of()); + } + + @DisplayName( + "when includeAllowedAccounts: true, then the pipeline should contain Spinnaker accounts") + @Test + public void testIncludeSpinnakerAccountsInPipeline() throws Exception { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // when + PipelineExecution pipelineExecution = + executionLauncher.start( + ExecutionType.PIPELINE, getConfigJson("ad-hoc/deploy-manifest.json")); + + // then + // verify that the execution runner attempted to start the execution as expected + verify(executionRunner).start(pipelineExecution); + // verify that accounts are set in the pipeline execution + assertThat(pipelineExecution.getAuthentication().getAllowedAccounts()) + .isEqualTo(Set.of("Account1", "Account2")); + } + + @DisplayName( + "when includeAllowedAccounts: false, then the pipeline should not contain Spinnaker accounts") + @Test + public void testExcludeSpinnakerAccountsFromPipeline() throws Exception { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // override properties to set includeAllowedAccounts to false + ExecutionConfigurationProperties executionConfigurationProperties = + new ExecutionConfigurationProperties(); + executionConfigurationProperties.setIncludeAllowedAccounts(false); + executionLauncher = + new ExecutionLauncher( + objectMapper, + executionRepository, + executionRunner, + clock, + applicationEventPublisher, + pipelineValidator, + registry, + executionConfigurationProperties); + + // when + PipelineExecution pipelineExecution = + executionLauncher.start( + ExecutionType.PIPELINE, getConfigJson("ad-hoc/deploy-manifest.json")); + + // then + // verify that the execution runner attempted to start the execution as expected + verify(executionRunner).start(pipelineExecution); + // verify that accounts are not set in the pipeline execution + assertThat(pipelineExecution.getAuthentication().getAllowedAccounts()).isEqualTo(Set.of()); + } + private Map getConfigJson(String resource) throws Exception { return objectMapper.readValue( ExecutionLauncherTest.class.getResourceAsStream(resource), Map.class); diff --git a/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilderTest.java b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilderTest.java new file mode 100644 index 0000000000..c2028aad33 --- /dev/null +++ b/orca-core/src/test/java/com/netflix/spinnaker/orca/pipeline/model/PipelineBuilderTest.java @@ -0,0 +1,84 @@ +/* + * Copyright 2023 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.orca.pipeline.model; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +import com.netflix.spinnaker.kork.common.Header; +import com.netflix.spinnaker.orca.api.pipeline.models.PipelineExecution; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.slf4j.MDC; + +class PipelineBuilderTest { + @BeforeEach + public void setup() { + MDC.clear(); + } + + @Test + void withStagesChecksForNull() { + PipelineBuilder pipelineBuilder = new PipelineBuilder("my-application"); + assertThrows(IllegalArgumentException.class, () -> pipelineBuilder.withStages(null)); + } + + @Test + void withStagesChecksForType() { + PipelineBuilder pipelineBuilder = new PipelineBuilder("my-application"); + Map stageWithoutType = new HashMap<>(); + stageWithoutType.put("name", "my-pipeline-stage"); + assertThrows( + IllegalArgumentException.class, + () -> pipelineBuilder.withStages(List.of(stageWithoutType))); + } + + @Test + void buildIncludesAllowedAccountsWhenTrue() { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // when + PipelineBuilder pipelineBuilder = + new PipelineBuilder("my-application").withIncludeAllowedAccounts(true); + PipelineExecution execution = pipelineBuilder.build(); + + // then + assertThat(execution.getAuthentication().getAllowedAccounts()) + .isEqualTo(Set.of("Account1", "Account2")); + } + + @Test + void buildExcludesAllowedAccountsWhenFalse() { + // given + MDC.put(Header.USER.getHeader(), "SpinnakerUser"); + MDC.put(Header.ACCOUNTS.getHeader(), "Account1,Account2"); + + // when + PipelineBuilder pipelineBuilder = + new PipelineBuilder("my-application").withIncludeAllowedAccounts(false); + PipelineExecution execution = pipelineBuilder.build(); + + // then + assertThat(execution.getAuthentication().getAllowedAccounts()).isEqualTo(Set.of()); + } +} diff --git a/orca-core/src/test/resources/execution-launcher-properties.yml b/orca-core/src/test/resources/execution-launcher-properties.yml index b82c89a763..3fb639d87b 100644 --- a/orca-core/src/test/resources/execution-launcher-properties.yml +++ b/orca-core/src/test/resources/execution-launcher-properties.yml @@ -7,3 +7,4 @@ executions: - name: updateApplication # eg. config to show how to set the name and allowAllUsers properties for an execution allowAllUsers: true - name: saveApplication # eg. config to show how to set just the name of an execution. It defaults to allowAllUsers: true + includeAllowedAccounts: true diff --git a/orca-echo/orca-echo.gradle b/orca-echo/orca-echo.gradle index 35d4e2e8e7..c6ab3976e1 100644 --- a/orca-echo/orca-echo.gradle +++ b/orca-echo/orca-echo.gradle @@ -28,6 +28,7 @@ dependencies { implementation("javax.validation:validation-api") implementation("io.spinnaker.fiat:fiat-core:$fiatVersion") implementation("io.spinnaker.fiat:fiat-api:$fiatVersion") + implementation("io.spinnaker.kork:kork-retrofit") testImplementation("com.squareup.retrofit:retrofit-mock") } diff --git a/orca-echo/src/main/groovy/com/netflix/spinnaker/orca/echo/config/EchoConfiguration.groovy b/orca-echo/src/main/groovy/com/netflix/spinnaker/orca/echo/config/EchoConfiguration.groovy index c0d56653f6..5ad4573e9f 100644 --- a/orca-echo/src/main/groovy/com/netflix/spinnaker/orca/echo/config/EchoConfiguration.groovy +++ b/orca-echo/src/main/groovy/com/netflix/spinnaker/orca/echo/config/EchoConfiguration.groovy @@ -21,6 +21,7 @@ import com.jakewharton.retrofit.Ok3Client import com.netflix.spinnaker.config.DefaultServiceEndpoint import com.netflix.spinnaker.config.okhttp3.OkHttpClientProvider import com.netflix.spinnaker.kork.dynamicconfig.DynamicConfigService +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.echo.EchoService import com.netflix.spinnaker.orca.echo.spring.EchoNotifyingExecutionListener import com.netflix.spinnaker.orca.echo.spring.EchoNotifyingStageListener @@ -69,6 +70,7 @@ class EchoConfiguration { .setEndpoint(echoEndpoint) .setClient(new Ok3Client(clientProvider.getClient(new DefaultServiceEndpoint("echo", echoEndpoint.url)))) .setLogLevel(retrofitLogLevel) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setLog(new RetrofitSlf4jLog(EchoService)) .setConverter(new JacksonConverter()) .build() diff --git a/orca-flex/orca-flex.gradle b/orca-flex/orca-flex.gradle index 1fa5592e3b..a7343f829b 100644 --- a/orca-flex/orca-flex.gradle +++ b/orca-flex/orca-flex.gradle @@ -24,4 +24,5 @@ dependencies { implementation("com.netflix.frigga:frigga") implementation("io.spinnaker.kork:kork-core") implementation("io.spinnaker.kork:kork-moniker") + implementation("io.spinnaker.kork:kork-retrofit") } diff --git a/orca-flex/src/main/groovy/com/netflix/spinnaker/orca/flex/config/FlexConfiguration.groovy b/orca-flex/src/main/groovy/com/netflix/spinnaker/orca/flex/config/FlexConfiguration.groovy index 4a58530ad0..b0131e6998 100644 --- a/orca-flex/src/main/groovy/com/netflix/spinnaker/orca/flex/config/FlexConfiguration.groovy +++ b/orca-flex/src/main/groovy/com/netflix/spinnaker/orca/flex/config/FlexConfiguration.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.orca.flex.config import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.flex.FlexService import com.netflix.spinnaker.orca.retrofit.RetrofitConfiguration import com.netflix.spinnaker.orca.retrofit.logging.RetrofitSlf4jLog @@ -59,6 +60,7 @@ class FlexConfiguration { .setEndpoint(flexEndpoint) .setClient(retrofitClient) .setLogLevel(retrofitLogLevel) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setLog(new RetrofitSlf4jLog(FlexService)) .setConverter(new JacksonConverter(mapper)) .build() diff --git a/orca-front50/src/main/groovy/com/netflix/spinnaker/orca/front50/DependentPipelineStarter.groovy b/orca-front50/src/main/groovy/com/netflix/spinnaker/orca/front50/DependentPipelineStarter.groovy index e68ae60efc..9e711344fc 100644 --- a/orca-front50/src/main/groovy/com/netflix/spinnaker/orca/front50/DependentPipelineStarter.groovy +++ b/orca-front50/src/main/groovy/com/netflix/spinnaker/orca/front50/DependentPipelineStarter.groovy @@ -76,8 +76,9 @@ class DependentPipelineStarter implements ApplicationContextAware { throw new ConfigurationException("Pipeline '${pipelineConfig.name}' is disabled and cannot be triggered") } - if (log.isInfoEnabled()) { - log.info('triggering dependent pipeline {}:{}', pipelineConfig.id, + log.info("triggering dependent pipeline ${pipelineConfig.id}") + if (log.isDebugEnabled()) { + log.debug('triggering dependent pipeline {}:{}', pipelineConfig.id, objectMapper.writeValueAsString(pipelineConfig)) } @@ -180,8 +181,9 @@ class DependentPipelineStarter implements ApplicationContextAware { def augmentedContext = [trigger: pipelineConfig.trigger] def processedPipeline = contextParameterProcessor.processPipeline(pipelineConfig, augmentedContext, false) - if (log.isInfoEnabled()) { - log.info('running pipeline {}:{}', pipelineConfig.id, objectMapper.writeValueAsString(processedPipeline)) + log.info("running pipeline ${pipelineConfig.id}") + if (log.isDebugEnabled()) { + log.debug('running pipeline {}:{}', pipelineConfig.id, objectMapper.writeValueAsString(processedPipeline)) } Callable callable diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/config/IgorConfiguration.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/config/IgorConfiguration.groovy index 8176a2c0e5..2fc638d15a 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/config/IgorConfiguration.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/config/IgorConfiguration.groovy @@ -20,6 +20,7 @@ import com.fasterxml.jackson.databind.ObjectMapper import com.jakewharton.retrofit.Ok3Client import com.netflix.spinnaker.config.DefaultServiceEndpoint import com.netflix.spinnaker.config.okhttp3.OkHttpClientProvider +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.igor.IgorService import com.netflix.spinnaker.orca.retrofit.RetrofitConfiguration import com.netflix.spinnaker.orca.retrofit.logging.RetrofitSlf4jLog @@ -34,7 +35,6 @@ import org.springframework.context.annotation.Import import retrofit.Endpoint import retrofit.RequestInterceptor import retrofit.RestAdapter -import retrofit.client.Client import retrofit.converter.JacksonConverter import static retrofit.Endpoints.newFixedEndpoint @@ -61,6 +61,7 @@ class IgorConfiguration { .setEndpoint(igorEndpoint) .setClient(new Ok3Client(clientProvider.getClient(new DefaultServiceEndpoint("igor", igorEndpoint.url)))) .setLogLevel(retrofitLogLevel) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setRequestInterceptor(spinnakerRequestInterceptor) .setLog(new RetrofitSlf4jLog(IgorService)) .setConverter(new JacksonConverter(mapper)) diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTask.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTask.groovy index 6aecaaecfa..10b5225c2f 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTask.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTask.groovy @@ -18,6 +18,7 @@ package com.netflix.spinnaker.orca.igor.tasks import com.google.common.base.Enums import com.netflix.spinnaker.kork.core.RetrySupport +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.OverridableTimeoutRetryableTask import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution @@ -26,7 +27,6 @@ import com.netflix.spinnaker.orca.igor.BuildService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import java.util.concurrent.TimeUnit @@ -81,9 +81,9 @@ class MonitorJenkinsJobTask implements OverridableTimeoutRetryableTask { } return TaskResult.builder(taskStatus).context(outputs).outputs(outputs).build() - } catch (RetrofitError e) { - if ([503, 500, 404].contains(e.response?.status)) { - log.warn("Http ${e.response.status} received from `igor`, retrying...") + } catch (SpinnakerHttpException e) { + if ([503, 500, 404].contains(e.responseCode)) { + log.warn("Http ${e.responseCode} received from `igor`, retrying...") return TaskResult.ofStatus(ExecutionStatus.RUNNING) } diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorQueuedJenkinsJobTask.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorQueuedJenkinsJobTask.groovy index 3a479d8f5b..7db9cd9c58 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorQueuedJenkinsJobTask.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorQueuedJenkinsJobTask.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import com.netflix.spinnaker.orca.igor.IgorService import org.springframework.beans.factory.annotation.Value @@ -28,7 +29,6 @@ import com.netflix.spinnaker.orca.igor.BuildService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError @Slf4j @Component @@ -56,9 +56,9 @@ class MonitorQueuedJenkinsJobTask implements OverridableTimeoutRetryableTask { createBacklink(stage, jenkinsController, jobName, build.number as Integer) return TaskResult.builder(ExecutionStatus.SUCCEEDED).context([buildNumber: build.number]).build() } - } catch (RetrofitError e) { - if ([503, 500, 404].contains(e.response?.status)) { - log.warn("Http ${e.response.status} received from `igor`, retrying...") + } catch (SpinnakerHttpException e) { + if ([503, 500, 404].contains(e.responseCode)) { + log.warn("Http ${e.responseCode} received from `igor`, retrying...") return TaskResult.ofStatus(ExecutionStatus.RUNNING) } throw e diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorWerckerJobStartedTask.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorWerckerJobStartedTask.groovy index 9fe96b056c..5fac717cf1 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorWerckerJobStartedTask.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorWerckerJobStartedTask.groovy @@ -8,6 +8,7 @@ */ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.OverridableTimeoutRetryableTask import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution @@ -16,7 +17,6 @@ import com.netflix.spinnaker.orca.igor.BuildService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import javax.annotation.Nonnull import java.util.concurrent.TimeUnit @@ -38,7 +38,6 @@ class MonitorWerckerJobStartedTask implements OverridableTimeoutRetryableTask { try { Map build = buildService.getBuild(buildNumber, master, job) - Map outputs = [:] if ("not_built".equals(build?.result) || build?.number == null) { //The build has not yet started, so the job started monitoring task needs to be re-run return TaskResult.ofStatus(ExecutionStatus.RUNNING) @@ -47,9 +46,9 @@ class MonitorWerckerJobStartedTask implements OverridableTimeoutRetryableTask { return TaskResult.builder(ExecutionStatus.SUCCEEDED).context([buildNumber: build.number]).build() } - } catch (RetrofitError e) { - if ([503, 500, 404].contains(e.response?.status)) { - log.warn("Http ${e.response.status} received from `igor`, retrying...") + } catch (SpinnakerHttpException e) { + if ([503, 500, 404].contains(e.responseCode)) { + log.warn("Http ${e.responseCode} received from `igor`, retrying...") return TaskResult.ofStatus(ExecutionStatus.RUNNING) } diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTask.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTask.groovy index 5138cfea44..24b3958413 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTask.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTask.groovy @@ -18,20 +18,19 @@ package com.netflix.spinnaker.orca.igor.tasks import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.kork.exceptions.SystemException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.RetryableTask import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus -import com.netflix.spinnaker.orca.api.pipeline.Task import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.exceptions.ExceptionHandler import com.netflix.spinnaker.orca.igor.BuildService -import com.netflix.spinnaker.orca.retrofit.exceptions.RetrofitExceptionHandler +import com.netflix.spinnaker.orca.retrofit.exceptions.SpinnakerServerExceptionHandler import org.slf4j.Logger import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired import org.springframework.http.HttpStatus import org.springframework.stereotype.Component -import retrofit.RetrofitError import retrofit.client.Response import javax.annotation.Nonnull @@ -48,7 +47,7 @@ class StartJenkinsJobTask implements RetryableTask { ObjectMapper objectMapper @Autowired - RetrofitExceptionHandler retrofitExceptionHandler + SpinnakerServerExceptionHandler spinnakerServerExceptionHandler @Nonnull @Override @@ -72,9 +71,9 @@ class StartJenkinsJobTask implements RetryableTask { .build() } } - catch (RetrofitError e) { + catch (SpinnakerServerException e){ // This igor call is idempotent so we can retry despite it being PUT/POST - ExceptionHandler.Response exceptionResponse = retrofitExceptionHandler.handle("StartJenkinsJob", e) + ExceptionHandler.Response exceptionResponse = spinnakerServerExceptionHandler.handle("StartJenkinsJob", e) if (exceptionResponse.shouldRetry) { log.warn("Failure communicating with igor to start a jenkins job, will retry", e) diff --git a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartScriptTask.groovy b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartScriptTask.groovy index c41bf144f7..5811b072d2 100644 --- a/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartScriptTask.groovy +++ b/orca-igor/src/main/groovy/com/netflix/spinnaker/orca/igor/tasks/StartScriptTask.groovy @@ -18,13 +18,14 @@ package com.netflix.spinnaker.orca.igor.tasks import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.kork.exceptions.SystemException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.RetryableTask import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.exceptions.ExceptionHandler import com.netflix.spinnaker.orca.igor.BuildService -import com.netflix.spinnaker.orca.retrofit.exceptions.RetrofitExceptionHandler +import com.netflix.spinnaker.orca.retrofit.exceptions.SpinnakerServerExceptionHandler import org.slf4j.Logger import org.slf4j.LoggerFactory import org.springframework.beans.factory.annotation.Autowired @@ -32,7 +33,6 @@ import org.springframework.beans.factory.annotation.Value import org.springframework.core.env.Environment import org.springframework.http.HttpStatus import org.springframework.stereotype.Component -import retrofit.RetrofitError import retrofit.client.Response import javax.annotation.Nonnull @@ -49,7 +49,7 @@ class StartScriptTask implements RetryableTask { ObjectMapper objectMapper @Autowired - RetrofitExceptionHandler retrofitExceptionHandler + SpinnakerServerExceptionHandler spinnakerServerExceptionHandler @Autowired Environment environment @@ -113,9 +113,9 @@ class StartScriptTask implements RetryableTask { .build() } } - catch (RetrofitError e) { + catch (SpinnakerServerException e) { // This igor call is idempotent so we can retry despite it being PUT/POST - ExceptionHandler.Response exceptionResponse = retrofitExceptionHandler.handle("StartJenkinsJob", e) + ExceptionHandler.Response exceptionResponse = spinnakerServerExceptionHandler.handle("StartJenkinsJob", e) if (exceptionResponse.shouldRetry) { log.warn("Failure communicating with igor to start a jenkins job, will retry", e) diff --git a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/BuildService.java b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/BuildService.java index 4afa1ffc34..f1cc610dbc 100644 --- a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/BuildService.java +++ b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/BuildService.java @@ -60,12 +60,16 @@ public Map getBuild(Integer buildNumber, String master, String j public Map getPropertyFile( Integer buildNumber, String fileName, String master, String job) { - return igorService.getPropertyFile(buildNumber, fileName, master, encode(job)); + return this.igorFeatureFlagProperties.isJobNameAsQueryParameter() + ? igorService.getPropertyFileWithJobAsQueryParam(buildNumber, fileName, master, encode(job)) + : igorService.getPropertyFile(buildNumber, fileName, master, encode(job)); } public List getArtifacts( Integer buildNumber, String fileName, String master, String job) { - return igorService.getArtifacts(buildNumber, fileName, master, encode(job)); + return this.igorFeatureFlagProperties.isJobNameAsQueryParameter() + ? igorService.getArtifactsWithJobAsQueryParam(buildNumber, fileName, master, encode(job)) + : igorService.getArtifacts(buildNumber, fileName, master, encode(job)); } public Response updateBuild( diff --git a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/IgorService.java b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/IgorService.java index e2ec1a5c68..d1d37512e5 100644 --- a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/IgorService.java +++ b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/IgorService.java @@ -79,6 +79,13 @@ Map getPropertyFile( @Path("master") String master, @Path(encode = false, value = "job") String job); + @GET("/builds/properties/{buildNumber}/{fileName}/{master}") + Map getPropertyFileWithJobAsQueryParam( + @Path("buildNumber") Integer buildNumber, + @Path("fileName") String fileName, + @Path("master") String master, + @Query(value = "job") String job); + @GET("/{repoType}/{projectKey}/{repositorySlug}/compareCommits") List compareCommits( @Path("repoType") String repoType, @@ -93,6 +100,13 @@ List getArtifacts( @Path("master") String master, @Path(value = "job", encode = false) String job); + @GET("/builds/artifacts/{buildNumber}/{master}") + List getArtifactsWithJobAsQueryParam( + @Path("buildNumber") Integer buildNumber, + @Query("propertyFile") String propertyFile, + @Path("master") String master, + @Query(value = "job") String job); + @POST("/gcb/builds/create/{account}") GoogleCloudBuild createGoogleCloudBuild( @Path("account") String account, @Body Map job); diff --git a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTask.java b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTask.java index 97e004b198..c7abee72ec 100644 --- a/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTask.java +++ b/orca-igor/src/main/java/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTask.java @@ -17,6 +17,9 @@ package com.netflix.spinnaker.orca.igor.tasks; import com.google.common.collect.ImmutableMap; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException; import com.netflix.spinnaker.orca.api.pipeline.RetryableTask; import com.netflix.spinnaker.orca.api.pipeline.TaskResult; import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus; @@ -28,7 +31,6 @@ import javax.annotation.Nonnull; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; -import retrofit.RetrofitError; @RequiredArgsConstructor @Slf4j @@ -53,7 +55,7 @@ protected int getMaxConsecutiveErrors() { try { TaskResult result = tryExecute(stageDefinition); return resetErrorCount(result); - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { if (stageDefinition.getConsecutiveErrors() < getMaxConsecutiveErrors() && isRetryable(e)) { return TaskResult.builder(ExecutionStatus.RUNNING) .context(errorContext(errors + 1)) @@ -83,14 +85,13 @@ private Map errorContext(int errors) { return Collections.singletonMap("consecutiveErrors", errors); } - private boolean isRetryable(RetrofitError retrofitError) { - if (retrofitError.getKind() == RetrofitError.Kind.NETWORK) { + private boolean isRetryable(SpinnakerServerException exception) { + if (exception instanceof SpinnakerNetworkException) { log.warn("Failed to communicate with igor, retrying..."); return true; - } - - if (retrofitError.getResponse() != null) { - int status = retrofitError.getResponse().getStatus(); + } else if (exception instanceof SpinnakerHttpException) { + var httpException = (SpinnakerHttpException) exception; + int status = httpException.getResponseCode(); if (status == 500 || status == 503) { log.warn(String.format("Received HTTP %s response from igor, retrying...", status)); return true; diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/BuildServiceSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/BuildServiceSpec.groovy index f7f6e3785c..e11731010c 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/BuildServiceSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/BuildServiceSpec.groovy @@ -72,6 +72,18 @@ class BuildServiceSpec extends Specification { 1 * igorService.getPropertyFile(BUILD_NUMBER, FILENAME, MASTER, JOB_NAME_ENCODED) } + void 'getPropertyFile method with job in query when flag is true'() { + IgorFeatureFlagProperties igorFeatureFlagProperties = new IgorFeatureFlagProperties() + igorFeatureFlagProperties.setJobNameAsQueryParameter(true) + buildService = new BuildService(igorService, igorFeatureFlagProperties) + + when: + buildService.getPropertyFile(BUILD_NUMBER, FILENAME, MASTER, JOB_NAME) + + then: + 1 * igorService.getPropertyFileWithJobAsQueryParam(BUILD_NUMBER, FILENAME, MASTER, JOB_NAME_ENCODED) + } + void 'stop method sends job name in path when flag is false'() { IgorFeatureFlagProperties igorFeatureFlagProperties = new IgorFeatureFlagProperties() igorFeatureFlagProperties.setJobNameAsQueryParameter(false) diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetAwsCodeBuildArtifactsTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetAwsCodeBuildArtifactsTaskSpec.groovy index fe160d668f..6166474ebb 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetAwsCodeBuildArtifactsTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetAwsCodeBuildArtifactsTaskSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.orca.igor.tasks import com.netflix.spinnaker.kork.artifacts.model.Artifact +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.IgorService @@ -73,14 +74,14 @@ class GetAwsCodeBuildArtifactsTaskSpec extends Specification { TaskResult result = task.execute(stage) then: - 1 * igorService.getAwsCodeBuildArtifacts(ACCOUNT, BUILD_ID) >> { throw stubRetrofitError() } + 1 * igorService.getAwsCodeBuildArtifacts(ACCOUNT, BUILD_ID) >> { throw stubNetworkError() } 0 * igorService._ result.getStatus() == ExecutionStatus.RUNNING } - def stubRetrofitError() { - return Stub(RetrofitError) { + def stubNetworkError() { + return new SpinnakerNetworkException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.NETWORK - } + }) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetBuildPropertiesTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetBuildPropertiesTaskSpec.groovy index 43a57d7eaa..c5ca50c3e2 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetBuildPropertiesTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetBuildPropertiesTaskSpec.groovy @@ -19,6 +19,7 @@ package com.netflix.spinnaker.orca.igor.tasks import com.fasterxml.jackson.databind.ObjectMapper import com.netflix.spinnaker.kork.artifacts.model.Artifact import com.netflix.spinnaker.kork.exceptions.ConfigurationException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.BuildService @@ -120,9 +121,11 @@ class GetBuildPropertiesTaskSpec extends Specification { getResponse() >> new Response("", 500, "", Collections.emptyList(), null) } + def httpException = new SpinnakerHttpException(igorError) + and: 1 * buildService.getPropertyFile(BUILD_NUMBER, PROPERTY_FILE, MASTER, JOB) >> - { throw igorError } + { throw httpException } when: TaskResult result = task.execute(stage) diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetCommitsTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetCommitsTaskSpec.groovy index ff13049e64..91256a84d4 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetCommitsTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetCommitsTaskSpec.groovy @@ -260,8 +260,7 @@ class GetCommitsTaskSpec extends Specification { then: 1 * scmService.compareCommits("stash", "projectKey", "repositorySlug", ['to': '186605b', 'from': 'a86305d', 'limit': 100]) >> { - throw new RetrofitError(null, null, - new Response("http://stash.com", 500, "test reason", [], null), null, null, null, null) + throw new SpinnakerHttpException(RetrofitError.httpError("http://stash.com", new Response("http://stash.com", 500, "test reason", [], null), null, null)) } result.status == taskStatus @@ -450,7 +449,7 @@ class GetCommitsTaskSpec extends Specification { and: task.scmService = Stub(ScmService) { compareCommits("stash", "projectKey", "repositorySlug", ['to': '186605b', 'from': 'a86305d', 'limit': 100]) >> { - throw new RetrofitError(null, null, new Response("http://stash.com", 404, "test reason", [], null), null, null, null, null) + throw new SpinnakerHttpException(RetrofitError.httpError("http://stash.com", new Response("http://stash.com", 404, "test reason", [], null), null, null)) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetGoogleCloudBuildArtifactsTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetGoogleCloudBuildArtifactsTaskSpec.groovy index a72aca200d..abc8e6fb7b 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetGoogleCloudBuildArtifactsTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/GetGoogleCloudBuildArtifactsTaskSpec.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.orca.igor.tasks import com.netflix.spinnaker.kork.artifacts.model.Artifact +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.IgorService @@ -72,14 +73,14 @@ class GetGoogleCloudBuildArtifactsTaskSpec extends Specification { TaskResult result = task.execute(stage) then: - 1 * igorService.getGoogleCloudBuildArtifacts(ACCOUNT, BUILD_ID) >> { throw stubRetrofitError() } + 1 * igorService.getGoogleCloudBuildArtifacts(ACCOUNT, BUILD_ID) >> { throw stubNetworkError() } 0 * igorService._ result.getStatus() == ExecutionStatus.RUNNING } - def stubRetrofitError() { - return Stub(RetrofitError) { + def stubNetworkError() { + return new SpinnakerNetworkException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.NETWORK - } + }) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorAwsCodeBuildTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorAwsCodeBuildTaskSpec.groovy index f9fcf3191a..fdd7e36efd 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorAwsCodeBuildTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorAwsCodeBuildTaskSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.IgorService @@ -82,14 +83,14 @@ class MonitorAwsCodeBuildTaskSpec extends Specification { TaskResult result = task.execute(stage) then: - 1 * igorService.getAwsCodeBuildExecution(ACCOUNT, BUILD_ID) >> { throw stubRetrofitError() } + 1 * igorService.getAwsCodeBuildExecution(ACCOUNT, BUILD_ID) >> { throw stubNetworkError() } 0 * igorService._ result.getStatus() == ExecutionStatus.RUNNING } - def stubRetrofitError() { - return Stub(RetrofitError) { + def stubNetworkError() { + return new SpinnakerNetworkException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.NETWORK - } + }) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorGoogleCloudBuildTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorGoogleCloudBuildTaskSpec.groovy index 68c7917d9a..74057951f6 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorGoogleCloudBuildTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorGoogleCloudBuildTaskSpec.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.IgorService @@ -85,14 +86,14 @@ class MonitorGoogleCloudBuildTaskSpec extends Specification { TaskResult result = task.execute(stage) then: - 1 * igorService.getGoogleCloudBuild(ACCOUNT, BUILD_ID) >> { throw stubRetrofitError() } + 1 * igorService.getGoogleCloudBuild(ACCOUNT, BUILD_ID) >> { throw stubNetworkError() } 0 * igorService._ result.getStatus() == ExecutionStatus.RUNNING } - def stubRetrofitError() { - return Stub(RetrofitError) { + def stubNetworkError() { + return new SpinnakerNetworkException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.NETWORK - } + }) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTaskSpec.groovy index 3929879b3c..45323115f7 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/MonitorJenkinsJobTaskSpec.groovy @@ -16,6 +16,8 @@ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.clouddriver.pipeline.job.model.JobStatus import com.netflix.spinnaker.orca.igor.BuildService @@ -111,8 +113,10 @@ class MonitorJenkinsJobTaskSpec extends Specification { getResponse() >> new Response('', httpStatus, '', [], null) } + def httpException = new SpinnakerHttpException(exception) + task.buildService = Stub(BuildService) { - getBuild(stage.context.buildNumber, stage.context.master, stage.context.job) >> { throw exception } + getBuild(stage.context.buildNumber, stage.context.master, stage.context.job) >> { throw httpException } } when: @@ -120,12 +124,12 @@ class MonitorJenkinsJobTaskSpec extends Specification { def thrownException = null try { result = task.execute(stage) - } catch (RetrofitError e) { + } catch (SpinnakerHttpException e) { thrownException = e } then: - thrownException ? thrownException == exception : result.status == expectedExecutionStatus + thrownException ? thrownException == httpException : result.status == expectedExecutionStatus where: httpStatus || expectedExecutionStatus diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTaskSpec.groovy index 409def020b..a40e2431f6 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/RetryableIgorTaskSpec.groovy @@ -16,6 +16,8 @@ package com.netflix.spinnaker.orca.igor.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.TaskResult import com.netflix.spinnaker.orca.igor.model.RetryableStageDefinition @@ -51,7 +53,7 @@ class RetryableIgorTaskSpec extends Specification { def result = task.execute(stage) then: - 1 * task.tryExecute(jobRequest) >> { throw stubRetrofitError(500) } + 1 * task.tryExecute(jobRequest) >> { throw stubHttpError(500) } jobRequest.getConsecutiveErrors() >> 0 result.status == ExecutionStatus.RUNNING result.context.get("consecutiveErrors") == 1 @@ -62,7 +64,7 @@ class RetryableIgorTaskSpec extends Specification { def result = task.execute(stage) then: - 1 * task.tryExecute(jobRequest) >> { throw stubRetrofitNetworkError() } + 1 * task.tryExecute(jobRequest) >> { throw stubNetworkError() } jobRequest.getConsecutiveErrors() >> 0 result.status == ExecutionStatus.RUNNING result.context.get("consecutiveErrors") == 1 @@ -73,9 +75,9 @@ class RetryableIgorTaskSpec extends Specification { def result = task.execute(stage) then: - 1 * task.tryExecute(jobRequest) >> { throw stubRetrofitError(404) } + 1 * task.tryExecute(jobRequest) >> { throw stubHttpError(404) } jobRequest.getConsecutiveErrors() >> 0 - thrown RetrofitError + thrown SpinnakerHttpException } def "should propagate the error we have reached the retry limit"() { @@ -83,9 +85,9 @@ class RetryableIgorTaskSpec extends Specification { def result = task.execute(stage) then: - 1 * task.tryExecute(jobRequest) >> { throw stubRetrofitError(500) } + 1 * task.tryExecute(jobRequest) >> { throw stubHttpError(500) } jobRequest.getConsecutiveErrors() >> 5 - thrown RetrofitError + thrown SpinnakerHttpException } def "should propagate a non-successful task status"() { @@ -109,16 +111,16 @@ class RetryableIgorTaskSpec extends Specification { result.context.get("consecutiveErrors") == 0 } - def stubRetrofitError(int statusCode) { - return Stub(RetrofitError) { + def stubHttpError(int statusCode) { + return new SpinnakerHttpException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.HTTP getResponse() >> new Response("", statusCode, "", Collections.emptyList(), null) - } + }) } - def stubRetrofitNetworkError() { - return Stub(RetrofitError) { + def stubNetworkError() { + return new SpinnakerNetworkException(Stub(RetrofitError) { getKind() >> RetrofitError.Kind.NETWORK - } + }) } } diff --git a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTaskSpec.groovy b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTaskSpec.groovy index 6aa397e5d9..a49f2f7f48 100644 --- a/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTaskSpec.groovy +++ b/orca-igor/src/test/groovy/com/netflix/spinnaker/orca/igor/tasks/StartJenkinsJobTaskSpec.groovy @@ -17,11 +17,12 @@ package com.netflix.spinnaker.orca.igor.tasks import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.igor.BuildService import com.netflix.spinnaker.orca.pipeline.model.PipelineExecutionImpl import com.netflix.spinnaker.orca.pipeline.model.StageExecutionImpl -import com.netflix.spinnaker.orca.retrofit.exceptions.RetrofitExceptionHandler +import com.netflix.spinnaker.orca.retrofit.exceptions.SpinnakerServerExceptionHandler import retrofit.RetrofitError import retrofit.client.Response import retrofit.mime.TypedString @@ -39,7 +40,7 @@ class StartJenkinsJobTaskSpec extends Specification { convertValue(_,_) >> [:] } - task.retrofitExceptionHandler = new RetrofitExceptionHandler() + task.spinnakerServerExceptionHandler = new SpinnakerServerExceptionHandler() } @Shared @@ -85,14 +86,14 @@ class StartJenkinsJobTaskSpec extends Specification { and: task.buildService = Stub(BuildService) { - build(stage.context.master, stage.context.job, stage.context.parameters, stage.startTime.toString()) >> {throw RetrofitError.unexpectedError("http://test", new RuntimeException())} + build(stage.context.master, stage.context.job, stage.context.parameters, stage.startTime.toString()) >> {throw new SpinnakerServerException(RetrofitError.unexpectedError("http://test", new RuntimeException()))} } when: task.execute(stage) then: - thrown(RetrofitError) + thrown(SpinnakerServerException) } def "handle 202 response from igor"() { diff --git a/orca-integrations-gremlin/orca-integrations-gremlin.gradle b/orca-integrations-gremlin/orca-integrations-gremlin.gradle index eebff9edb0..a1d746dc08 100644 --- a/orca-integrations-gremlin/orca-integrations-gremlin.gradle +++ b/orca-integrations-gremlin/orca-integrations-gremlin.gradle @@ -6,6 +6,7 @@ dependencies { implementation(project(":orca-retrofit")) implementation("org.springframework.boot:spring-boot-autoconfigure") implementation("org.slf4j:slf4j-api") + implementation("io.spinnaker.kork:kork-retrofit") testImplementation(project(":orca-core-tck")) } diff --git a/orca-integrations-gremlin/src/main/java/com/netflix/spinnaker/orca/config/GremlinConfiguration.kt b/orca-integrations-gremlin/src/main/java/com/netflix/spinnaker/orca/config/GremlinConfiguration.kt index febce12931..b9adb02ac5 100644 --- a/orca-integrations-gremlin/src/main/java/com/netflix/spinnaker/orca/config/GremlinConfiguration.kt +++ b/orca-integrations-gremlin/src/main/java/com/netflix/spinnaker/orca/config/GremlinConfiguration.kt @@ -2,6 +2,7 @@ package com.netflix.spinnaker.orca.config import com.fasterxml.jackson.databind.PropertyNamingStrategy import com.fasterxml.jackson.databind.SerializationFeature +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.gremlin.GremlinConverter import com.netflix.spinnaker.orca.gremlin.GremlinService import com.netflix.spinnaker.orca.jackson.OrcaObjectMapper @@ -47,6 +48,7 @@ class GremlinConfiguration { .setEndpoint(gremlinEndpoint) .setClient(retrofitClient) .setLogLevel(RestAdapter.LogLevel.BASIC) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setLog(RetrofitSlf4jLog(GremlinService::class.java)) .setConverter(GremlinConverter(mapper)) .build() diff --git a/orca-kayenta/orca-kayenta.gradle b/orca-kayenta/orca-kayenta.gradle index a7a073c840..2514a1ea17 100644 --- a/orca-kayenta/orca-kayenta.gradle +++ b/orca-kayenta/orca-kayenta.gradle @@ -27,6 +27,7 @@ dependencies { implementation("com.netflix.frigga:frigga") implementation("io.spinnaker.kork:kork-moniker") implementation("com.fasterxml.jackson.module:jackson-module-kotlin") + implementation("io.spinnaker.kork:kork-retrofit") testImplementation(project(":orca-test-kotlin")) testImplementation("com.github.tomakehurst:wiremock-jre8-standalone") diff --git a/orca-kayenta/src/main/kotlin/com/netflix/spinnaker/orca/kayenta/config/KayentaConfiguration.kt b/orca-kayenta/src/main/kotlin/com/netflix/spinnaker/orca/kayenta/config/KayentaConfiguration.kt index 45b516d07b..588b90695f 100644 --- a/orca-kayenta/src/main/kotlin/com/netflix/spinnaker/orca/kayenta/config/KayentaConfiguration.kt +++ b/orca-kayenta/src/main/kotlin/com/netflix/spinnaker/orca/kayenta/config/KayentaConfiguration.kt @@ -18,6 +18,7 @@ package com.netflix.spinnaker.orca.kayenta.config import com.fasterxml.jackson.databind.SerializationFeature.WRITE_DATES_AS_TIMESTAMPS import com.netflix.spinnaker.kork.api.expressions.ExpressionFunctionProvider +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.jackson.OrcaObjectMapper import com.netflix.spinnaker.orca.kayenta.KayentaService import com.netflix.spinnaker.orca.retrofit.RetrofitConfiguration @@ -77,6 +78,7 @@ class KayentaConfiguration { .setLogLevel(retrofitLogLevel) .setLog(RetrofitSlf4jLog(KayentaService::class.java)) .setConverter(JacksonConverter(mapper)) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .build() .create(KayentaService::class.java) } diff --git a/orca-keel/orca-keel.gradle b/orca-keel/orca-keel.gradle index d770738610..e1e361a710 100644 --- a/orca-keel/orca-keel.gradle +++ b/orca-keel/orca-keel.gradle @@ -24,6 +24,7 @@ dependencies { implementation("com.fasterxml.jackson.module:jackson-module-kotlin") implementation("org.springframework:spring-web") implementation("org.springframework.boot:spring-boot-autoconfigure") + implementation("io.spinnaker.kork:kork-retrofit") testImplementation("com.fasterxml.jackson.module:jackson-module-kotlin") testImplementation("dev.minutest:minutest") @@ -33,6 +34,8 @@ dependencies { testImplementation("org.codehaus.groovy:groovy") testImplementation("org.junit.jupiter:junit-jupiter-api") testImplementation("org.junit.jupiter:junit-jupiter-params") + testImplementation("com.github.tomakehurst:wiremock-jre8-standalone") + testImplementation("org.mockito:mockito-junit-jupiter") } test { diff --git a/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/config/KeelConfiguration.kt b/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/config/KeelConfiguration.kt index 9bc4a53b72..09924c8298 100644 --- a/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/config/KeelConfiguration.kt +++ b/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/config/KeelConfiguration.kt @@ -22,6 +22,7 @@ import com.fasterxml.jackson.module.kotlin.KotlinModule import com.jakewharton.retrofit.Ok3Client import com.netflix.spinnaker.config.DefaultServiceEndpoint import com.netflix.spinnaker.config.okhttp3.OkHttpClientProvider +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.KeelService import com.netflix.spinnaker.orca.jackson.OrcaObjectMapper import org.springframework.beans.factory.annotation.Value @@ -60,6 +61,7 @@ class KeelConfiguration { .setEndpoint(keelEndpoint) .setClient(Ok3Client(clientProvider.getClient(DefaultServiceEndpoint("keel", keelEndpoint.url)))) .setLogLevel(retrofitLogLevel) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setConverter(JacksonConverter(keelObjectMapper)) .build() .create(KeelService::class.java) diff --git a/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTask.kt b/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTask.kt index 31cfc13070..186d8fb547 100644 --- a/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTask.kt +++ b/orca-keel/src/main/kotlin/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTask.kt @@ -18,7 +18,9 @@ package com.netflix.spinnaker.orca.keel.task import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.kotlin.convertValue -import com.fasterxml.jackson.module.kotlin.readValue +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException import com.netflix.spinnaker.orca.KeelService import com.netflix.spinnaker.orca.api.pipeline.RetryableTask @@ -37,7 +39,6 @@ import java.time.Instant import java.util.concurrent.TimeUnit import org.slf4j.LoggerFactory import org.springframework.stereotype.Component -import retrofit.RetrofitError /** * Task that retrieves a Managed Delivery config manifest from source control via igor, then publishes it to keel, @@ -75,7 +76,7 @@ constructor( keelService.publishDeliveryConfig(deliveryConfig) TaskResult.builder(ExecutionStatus.SUCCEEDED).context(emptyMap()).build() - } catch (e: RetrofitError) { + } catch (e: SpinnakerServerException) { handleRetryableFailures(e, context) } catch (e: Exception) { log.error("Unexpected exception while executing {}, aborting.", javaClass.simpleName, e) @@ -153,35 +154,56 @@ constructor( ?: ""}/${context.manifest}@${context.ref}" } - /** - * Handle (potentially) retryable failures by looking at the retrofit error type or HTTP status code. A few 40x errors - * are handled as special cases to provide more friendly error messages to the UI. - */ - private fun handleRetryableFailures(error: RetrofitError, context: ImportDeliveryConfigContext): TaskResult { + /* + * Handle (potentially) all Spinnaker*Exception. Smart casts to the respective type on Http error and/or Network error. + * @return default error message on non-http and non-network errors. + * */ + private fun handleRetryableFailures(error: SpinnakerServerException, context: ImportDeliveryConfigContext): TaskResult { return when { - error.kind == RetrofitError.Kind.NETWORK -> { + error is SpinnakerNetworkException -> { // retry if unable to connect buildRetry( context, - "Network error talking to downstream service, attempt ${context.attempt} of ${context.maxRetries}: ${error.friendlyMessage}" + "Network error talking to downstream service, attempt ${context.attempt} of ${context.maxRetries}: ${error.networkErrorMessage}" ) } - error.response?.status in 400..499 -> { - val response = error.response!! + error is SpinnakerHttpException -> { + handleRetryableFailures(error, context) + } else -> { + buildRetry( + context, + "Server error talking to downstream service, attempt ${context.attempt} of ${context.maxRetries}: ${error.serverErrorMessage}" + ) + } + } + } + + /** + * Handle (potentially) retryable failures by looking at the HTTP status code. A few 4xx errors + * are handled as special cases to provide more friendly error messages to the UI. + */ + private fun handleRetryableFailures(httpException: SpinnakerHttpException, context: ImportDeliveryConfigContext): TaskResult{ + return when { + httpException.responseCode in 400..499 -> { + val responseBody = httpException.responseBody // just give up on 4xx errors, which are unlikely to resolve with retries, but give users a hint about 401 // errors from igor/scm, and attempt to parse keel errors (which are typically more informative) buildError( - if (error.fromIgor && response.status == 401) { + if (httpException.fromIgor && httpException.responseCode == 401) { UNAUTHORIZED_SCM_ACCESS_MESSAGE - } else if (error.fromKeel && response.body.length() > 0) { - // keel's errors should use the standard Spring format, so we try to parse them + } else if (httpException.fromKeel && responseBody!=null && responseBody.isNotEmpty()) { + // keel's errors should use the standard Spring format try { - objectMapper.readValue(response.body.`in`()) + if (responseBody["timestamp"] !=null) { + SpringHttpError(responseBody["error"] as String, responseBody["status"] as Int, responseBody["message"] as? String, Instant.ofEpochMilli(responseBody["timestamp"] as Long), responseBody["details"] as? Map) + } else { + SpringHttpError(error = responseBody["error"] as String, status = responseBody["status"] as Int, message = responseBody["message"] as? String, details = responseBody["details"] as? Map) + } } catch (_: Exception) { - "Non-retryable HTTP response ${error.response?.status} received from downstream service: ${error.friendlyMessage}" + "Non-retryable HTTP response ${httpException.responseCode} received from downstream service: ${httpException.httpErrorMessage}" } } else { - "Non-retryable HTTP response ${error.response?.status} received from downstream service: ${error.friendlyMessage}" + "Non-retryable HTTP response ${httpException.responseCode} received from downstream service: ${httpException.httpErrorMessage}" } ) } @@ -189,7 +211,7 @@ constructor( // retry on other status codes buildRetry( context, - "Retryable HTTP response ${error.response?.status} received from downstream service: ${error.friendlyMessage}" + "Retryable HTTP response ${httpException.responseCode} received from downstream service: ${httpException.httpErrorMessage}" ) } } @@ -233,20 +255,28 @@ constructor( override fun getTimeout() = TimeUnit.SECONDS.toMillis(180) - val RetrofitError.friendlyMessage: String - get() = if (kind == RetrofitError.Kind.HTTP) { - "HTTP ${response.status} ${response.url}: ${cause?.message ?: message}" - } else { - "$message: ${cause?.message ?: ""}" + val SpinnakerHttpException.httpErrorMessage: String + get() { + return "HTTP ${responseCode} ${url}: ${cause?.message ?: message}" + } + + val SpinnakerNetworkException.networkErrorMessage: String + get() { + return "$message: ${cause?.message ?: ""}" + } + + val SpinnakerServerException.serverErrorMessage: String + get() { + return "$message" } - val RetrofitError.fromIgor: Boolean + val SpinnakerServerException.fromIgor: Boolean get() { val parsedUrl = URL(url) return parsedUrl.host.contains("igor") || parsedUrl.port == 8085 } - val RetrofitError.fromKeel: Boolean + val SpinnakerServerException.fromKeel: Boolean get() { val parsedUrl = URL(url) return parsedUrl.host.contains("keel") || parsedUrl.port == 8087 @@ -271,7 +301,7 @@ constructor( val error: String, val status: Int, val message: String? = error, - val timestamp: Instant = Instant.now(), + val timestamp: Instant? = Instant.now(), val details: Map? = null // this is keel-specific ) diff --git a/orca-keel/src/test/java/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTaskTest.java b/orca-keel/src/test/java/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTaskTest.java new file mode 100644 index 0000000000..f1b6aa5251 --- /dev/null +++ b/orca-keel/src/test/java/com/netflix/spinnaker/orca/keel/task/ImportDeliveryConfigTaskTest.java @@ -0,0 +1,455 @@ +/* + * Copyright 2024 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.orca.keel.task; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static com.netflix.spinnaker.orca.api.pipeline.models.ExecutionType.PIPELINE; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Named.named; +import static org.junit.jupiter.params.provider.Arguments.arguments; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.JsonProcessingException; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.http.Fault; +import com.github.tomakehurst.wiremock.http.HttpHeaders; +import com.github.tomakehurst.wiremock.junit5.WireMockExtension; +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler; +import com.netflix.spinnaker.okhttp.SpinnakerRequestInterceptor; +import com.netflix.spinnaker.orca.KeelService; +import com.netflix.spinnaker.orca.api.pipeline.TaskResult; +import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus; +import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionType; +import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution; +import com.netflix.spinnaker.orca.config.KeelConfiguration; +import com.netflix.spinnaker.orca.igor.ScmService; +import com.netflix.spinnaker.orca.pipeline.model.PipelineExecutionImpl; +import com.netflix.spinnaker.orca.pipeline.model.StageExecutionImpl; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.stream.Stream; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import org.springframework.http.HttpStatus; +import retrofit.RestAdapter; +import retrofit.RetrofitError; +import retrofit.client.OkClient; +import retrofit.converter.JacksonConverter; + +/* + * @see com.netflix.spinnaker.orca.keel.ImportDeliveryConfigTaskTests.kt already covers up few tests related to @see ImportDeliveryConfigTask. + * This new java class is Introduced to improvise the API testing with the help of wiremock. + * Test using wiremock would help in smooth migration to retrofit2.x along with the addition of {@link SpinnakerRetrofitErrorHandler}. + * */ +public class ImportDeliveryConfigTaskTest { + + private static KeelService keelService; + private static ScmService scmService; + + private static final ObjectMapper objectMapper = new KeelConfiguration().keelObjectMapper(); + + private StageExecution stage; + + private ImportDeliveryConfigTask importDeliveryConfigTask; + + private Map contextMap = new LinkedHashMap<>(); + + private static final int keelPort = 8087; + + @BeforeAll + static void setupOnce(WireMockRuntimeInfo wmRuntimeInfo) { + OkClient okClient = new OkClient(); + RestAdapter.LogLevel retrofitLogLevel = RestAdapter.LogLevel.NONE; + + keelService = + new RestAdapter.Builder() + .setRequestInterceptor(new SpinnakerRequestInterceptor(true)) + .setEndpoint(wmRuntimeInfo.getHttpBaseUrl()) + .setClient(okClient) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .setLogLevel(retrofitLogLevel) + .setConverter(new JacksonConverter(objectMapper)) + .build() + .create(KeelService.class); + } + + @BeforeEach + public void setup() { + scmService = mock(ScmService.class); + importDeliveryConfigTask = new ImportDeliveryConfigTask(keelService, scmService, objectMapper); + + PipelineExecutionImpl pipeline = new PipelineExecutionImpl(PIPELINE, "keeldemo"); + contextMap.put("repoType", "stash"); + contextMap.put("projectKey", "SPKR"); + contextMap.put("repositorySlug", "keeldemo"); + contextMap.put("directory", "."); + contextMap.put("manifest", "spinnaker.yml"); + contextMap.put("ref", "refs/heads/master"); + contextMap.put("attempt", 1); + contextMap.put("maxRetries", 5); + + stage = new StageExecutionImpl(pipeline, ExecutionType.PIPELINE.toString(), contextMap); + } + + @RegisterExtension + static WireMockExtension wireMock = + WireMockExtension.newInstance().options(wireMockConfig().port(keelPort)).build(); + + private static void simulateFault(String url, String body, HttpStatus httpStatus) { + wireMock.givenThat( + WireMock.post(urlPathEqualTo(url)) + .willReturn( + aResponse() + .withHeaders(HttpHeaders.noHeaders()) + .withStatus(httpStatus.value()) + .withBody(body))); + } + + private static void simulateFault(String url, Fault fault) { + wireMock.givenThat(WireMock.post(urlPathEqualTo(url)).willReturn(aResponse().withFault(fault))); + } + + /** + * This test is a positive case which verifies if the task returns {@link + * ImportDeliveryConfigTask.SpringHttpError} on 4xx http error. Here the error body is mocked with + * timestamps in supported Time Units, which will be parsed to exact same timestamp in the + * method @see {@link ImportDeliveryConfigTask#handleRetryableFailures(RetrofitError, + * ImportDeliveryConfigTask.ImportDeliveryConfigContext)} and results in successful assertions of + * all the fields. + * + *

The cases when the timestamp results in accurate value, are when the units in : {@link + * ChronoUnit#MILLIS} {@link ChronoUnit#SECONDS} {@link ChronoUnit#DAYS} {@link ChronoUnit#HOURS} + * {@link ChronoUnit#HALF_DAYS} {@link ChronoUnit#MINUTES} + */ + @ParameterizedTest(name = "{0}") + @MethodSource("parameterizePositiveHttpErrorScenario") + public void verifyPositiveHttpErrorScenarios( + HttpStatus httpStatus, ImportDeliveryConfigTask.SpringHttpError httpError) + throws JsonProcessingException { + + TaskResult expectedTaskResult = + TaskResult.builder(ExecutionStatus.TERMINAL).context(Map.of("error", httpError)).build(); + + // simulate SpringHttpError with http error status code + simulateFault("/delivery-configs/", objectMapper.writeValueAsString(httpError), httpStatus); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + + verifyGetDeliveryConfigManifestInvocations(); + + assertThat(expectedTaskResult).isEqualTo(result); + } + + /** + * This test is a negative case which verifies if the task returns {@link + * ImportDeliveryConfigTask.SpringHttpError} on 4xx http error. Here the error body is mocked with + * timestamp in Time Units that are unsupported, which WILL NOT be parsed to exact timestamp in + * the method @see {@link ImportDeliveryConfigTask#handleRetryableFailures(RetrofitError, + * ImportDeliveryConfigTask.ImportDeliveryConfigContext)} and results in will contain incorrect + * timestamp. + * + *

The cases where the timestamp will result in incorrect value, are when the units in : {@link + * ChronoUnit#NANOS} {@link ChronoUnit#MICROS} + */ + @ParameterizedTest(name = "{0}") + @MethodSource("parameterizeNegativeHttpErrorScenario") + public void verifyNegativeHttpErrorScenarios( + HttpStatus httpStatus, ImportDeliveryConfigTask.SpringHttpError httpError) + throws JsonProcessingException { + + // simulate SpringHttpError with http error status code + simulateFault("/delivery-configs/", objectMapper.writeValueAsString(httpError), httpStatus); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + ImportDeliveryConfigTask.SpringHttpError actualHttpErrorBody = + (ImportDeliveryConfigTask.SpringHttpError) result.getContext().get("error"); + + verifyGetDeliveryConfigManifestInvocations(); + + // assert all the values in the http error body are true except the timestamp in nanos + assertThat(actualHttpErrorBody.getStatus()).isEqualTo(httpStatus.value()); + assertThat(actualHttpErrorBody.getError()).isEqualTo(httpStatus.getReasonPhrase()); + assertThat(actualHttpErrorBody.getMessage()).isEqualTo(httpStatus.name()); + assertThat(actualHttpErrorBody.getDetails()) + .isEqualTo(Map.of("exception", "Http Error occurred")); + assertThat(actualHttpErrorBody.getTimestamp().getEpochSecond()) + .isEqualTo(httpError.getTimestamp().getEpochSecond()); + assertThat(actualHttpErrorBody.getTimestamp().getNano()) + .isNotEqualTo(httpError.getTimestamp().getNano()); + } + + /** + * Test to verify when the response body doesn't have timestamp. Field will be initialized with + * default value {@link Instant#now} + */ + @Test + public void testSpringHttpErrorWithoutTimestamp() throws JsonProcessingException { + + var httpStatus = HttpStatus.BAD_REQUEST; + + // Map of SpringHttpError is initialized without timestamp + var httpError = mapOfSpringHttpError(httpStatus); + + // simulate SpringHttpError with http error status code + simulateFault("/delivery-configs/", objectMapper.writeValueAsString(httpError), httpStatus); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + ImportDeliveryConfigTask.SpringHttpError actualHttpErrorBody = + (ImportDeliveryConfigTask.SpringHttpError) result.getContext().get("error"); + + verifyGetDeliveryConfigManifestInvocations(); + + assertThat(actualHttpErrorBody.getStatus()).isEqualTo(httpStatus.value()); + assertThat(actualHttpErrorBody.getError()).isEqualTo(httpStatus.getReasonPhrase()); + assertThat(actualHttpErrorBody.getMessage()).isEqualTo(httpStatus.name()); + assertThat(actualHttpErrorBody.getDetails()) + .isEqualTo(Map.of("exception", "Http Error occurred")); + + // The timestamp field will have the current time, and hence only the instance type is verified + assertThat(actualHttpErrorBody.getTimestamp()).isInstanceOf(Instant.class); + } + + @Test + public void testTaskResultWhenErrorBodyIsEmpty() { + + String expectedMessage = + String.format( + "Non-retryable HTTP response %s received from downstream service: %s", + HttpStatus.BAD_REQUEST.value(), + "HTTP 400 " + + wireMock.baseUrl() + + "/delivery-configs/: Status: 400, URL: " + + wireMock.baseUrl() + + "/delivery-configs/, Message: Bad Request"); + + var errorMap = new HashMap<>(); + errorMap.put("message", expectedMessage); + + TaskResult terminal = + TaskResult.builder(ExecutionStatus.TERMINAL).context(Map.of("error", errorMap)).build(); + + // Simulate any 4xx http error with empty error response body + String emptyBody = ""; + simulateFault("/delivery-configs/", emptyBody, HttpStatus.BAD_REQUEST); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + + verifyGetDeliveryConfigManifestInvocations(); + + assertThat(result).isEqualTo(terminal); + } + + @Test + public void testTaskResultWhenHttp5xxErrorIsThrown() { + + contextMap.put("attempt", (Integer) contextMap.get("attempt") + 1); + contextMap.put( + "errorFromLastAttempt", + "Retryable HTTP response 500 received from downstream service: HTTP 500 " + + wireMock.baseUrl() + + "/delivery-configs/: Status: 500, URL: " + + wireMock.baseUrl() + + "/delivery-configs/, Message: Server Error"); + + TaskResult running = TaskResult.builder(ExecutionStatus.RUNNING).context(contextMap).build(); + + // Simulate any 5xx http error with empty error response body + String emptyBody = ""; + simulateFault("/delivery-configs/", emptyBody, HttpStatus.INTERNAL_SERVER_ERROR); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + + verifyGetDeliveryConfigManifestInvocations(); + + assertThat(result).isEqualTo(running); + } + + @Test + public void testTaskResultWhenAPIFailsWithNetworkError() { + + contextMap.put("attempt", (Integer) contextMap.get("attempt") + 1); + contextMap.put( + "errorFromLastAttempt", + String.format( + "Network error talking to downstream service, attempt 1 of %s: Connection reset: Connection reset", + contextMap.get("maxRetries"))); + + TaskResult running = TaskResult.builder(ExecutionStatus.RUNNING).context(contextMap).build(); + + // Simulate network failure + simulateFault("/delivery-configs/", Fault.CONNECTION_RESET_BY_PEER); + + getDeliveryConfigManifest(); + + var result = importDeliveryConfigTask.execute(stage); + + verifyGetDeliveryConfigManifestInvocations(); + + assertThat(result).isEqualTo(running); + } + + private static Stream parameterizePositiveHttpErrorScenario() { + + HttpStatus httpStatus = HttpStatus.BAD_REQUEST; + + // Initialize SpringHttpError with timestamp in milliseconds and HttpStatus 400 bad request. + var httpErrorTimestampInMillis = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.MILLIS)); + + // Initialize SpringHttpError with timestamp in seconds and HttpStatus 400 bad request. + var httpErrorTimestampInSeconds = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.SECONDS)); + + // Initialize SpringHttpError with timestamp in minutes and HttpStatus 400 bad request. + var httpErrorTimestampInMinutes = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.MINUTES)); + + // Initialize SpringHttpError with timestamp in hours and HttpStatus 400 bad request. + var httpErrorTimestampInHours = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.HOURS)); + + // Initialize SpringHttpError with timestamp in days and HttpStatus 400 bad request. + var httpErrorTimestampInDays = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.DAYS)); + + // Initialize SpringHttpError with timestamp in half days and HttpStatus 400 bad request. + var httpErrorTimestampInHalfDays = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.HALF_DAYS)); + + return Stream.of( + arguments( + named("http error with timestamp in " + ChronoUnit.MILLIS.name(), httpStatus), + httpErrorTimestampInMillis), + arguments( + named("http error with timestamp in " + ChronoUnit.SECONDS.name(), httpStatus), + httpErrorTimestampInSeconds), + arguments( + named("http error with timestamp in " + ChronoUnit.MINUTES.name(), httpStatus), + httpErrorTimestampInMinutes), + arguments( + named("http error with timestamp in " + ChronoUnit.HOURS.name(), httpStatus), + httpErrorTimestampInHours), + arguments( + named("http error with timestamp in " + ChronoUnit.DAYS.name(), httpStatus), + httpErrorTimestampInDays), + arguments( + named("http error with timestamp in " + ChronoUnit.HALF_DAYS.name(), httpStatus), + httpErrorTimestampInHalfDays)); + } + + private static Stream parameterizeNegativeHttpErrorScenario() { + + HttpStatus httpStatus = HttpStatus.BAD_REQUEST; + + // Initialize SpringHttpError with timestamp in milliseconds and HttpStatus 400 bad request. + var httpErrorTimestampInNanos = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.NANOS)); + + // Initialize SpringHttpError with timestamp in seconds and HttpStatus 400 bad request. + var httpErrorTimestampInMicros = + makeSpringHttpError(httpStatus, Instant.now().truncatedTo(ChronoUnit.MICROS)); + + return Stream.of( + arguments( + named("http error with timestamp in " + ChronoUnit.NANOS.name(), httpStatus), + httpErrorTimestampInNanos), + arguments( + named("http error with timestamp in " + ChronoUnit.MICROS.name(), httpStatus), + httpErrorTimestampInMicros)); + } + + private void getDeliveryConfigManifest() { + when(scmService.getDeliveryConfigManifest( + (String) contextMap.get("repoType"), + (String) contextMap.get("projectKey"), + (String) contextMap.get("repositorySlug"), + (String) contextMap.get("directory"), + (String) contextMap.get("manifest"), + (String) contextMap.get("ref"))) + .thenReturn( + Map.of( + "name", + "keeldemo-manifest", + "application", + "keeldemo", + "artifacts", + Collections.emptySet(), + "environments", + Collections.emptySet())); + } + + private static ImportDeliveryConfigTask.SpringHttpError makeSpringHttpError( + HttpStatus httpStatus, Instant timestamp) { + + return new ImportDeliveryConfigTask.SpringHttpError( + httpStatus.getReasonPhrase(), + httpStatus.value(), + httpStatus.name(), + timestamp, + Map.of("exception", "Http Error occurred")); + } + + private static Map mapOfSpringHttpError(HttpStatus httpStatus) { + + return Map.of( + "error", + httpStatus.getReasonPhrase(), + "status", + httpStatus.value(), + "message", + httpStatus.name(), + "details", + Map.of("exception", "Http Error occurred")); + } + + private void verifyGetDeliveryConfigManifestInvocations() { + verify(scmService, times(1)) + .getDeliveryConfigManifest( + (String) contextMap.get("repoType"), + (String) contextMap.get("projectKey"), + (String) contextMap.get("repositorySlug"), + (String) contextMap.get("directory"), + (String) contextMap.get("manifest"), + (String) contextMap.get("ref")); + } +} diff --git a/orca-keel/src/test/kotlin/com/netflix/spinnaker/orca/keel/ImportDeliveryConfigTaskTests.kt b/orca-keel/src/test/kotlin/com/netflix/spinnaker/orca/keel/ImportDeliveryConfigTaskTests.kt index 0104af4c42..4caba7797b 100644 --- a/orca-keel/src/test/kotlin/com/netflix/spinnaker/orca/keel/ImportDeliveryConfigTaskTests.kt +++ b/orca-keel/src/test/kotlin/com/netflix/spinnaker/orca/keel/ImportDeliveryConfigTaskTests.kt @@ -18,6 +18,7 @@ package com.netflix.spinnaker.orca.keel import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.module.kotlin.convertValue +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.kork.web.exceptions.InvalidRequestException import com.netflix.spinnaker.orca.KeelService import com.netflix.spinnaker.orca.api.pipeline.TaskResult @@ -30,7 +31,6 @@ import com.netflix.spinnaker.orca.igor.ScmService import com.netflix.spinnaker.orca.keel.model.DeliveryConfig import com.netflix.spinnaker.orca.keel.task.ImportDeliveryConfigTask import com.netflix.spinnaker.orca.keel.task.ImportDeliveryConfigTask.Companion.UNAUTHORIZED_SCM_ACCESS_MESSAGE -import com.netflix.spinnaker.orca.keel.task.ImportDeliveryConfigTask.SpringHttpError import com.netflix.spinnaker.orca.pipeline.model.DefaultTrigger import com.netflix.spinnaker.orca.pipeline.model.GitTrigger import com.netflix.spinnaker.orca.pipeline.model.PipelineExecutionImpl @@ -41,21 +41,14 @@ import io.mockk.every import io.mockk.mockk import io.mockk.slot import io.mockk.verify -import org.springframework.http.HttpStatus.BAD_REQUEST -import org.springframework.http.HttpStatus.FORBIDDEN import retrofit.RetrofitError import retrofit.client.Response -import retrofit.converter.JacksonConverter -import retrofit.mime.TypedInput import strikt.api.expectThat import strikt.api.expectThrows import strikt.assertions.contains -import strikt.assertions.isA import strikt.assertions.isEqualTo import strikt.assertions.isNotEqualTo import strikt.assertions.isNotNull -import java.time.Instant -import java.time.temporal.ChronoUnit internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { data class ManifestLocation( @@ -111,34 +104,6 @@ internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { context ) ) - - val parsingError = SpringHttpError( - status = BAD_REQUEST.value(), - error = BAD_REQUEST.reasonPhrase, - message = "Parsing error", - details = mapOf( - "message" to "Parsing error", - "path" to listOf( - mapOf( - "type" to "SomeClass", - "field" to "someField" - ) - ), - "pathExpression" to ".someField" - ), - // Jackson writes this as ms-since-epoch, so we need to strip off the nanoseconds, since we'll - // be round-tripping it through Jackson before testing for equality. - timestamp = Instant.now().truncatedTo(ChronoUnit.MILLIS) - ) - - val accessDeniedError = SpringHttpError( - status = FORBIDDEN.value(), - error = FORBIDDEN.reasonPhrase, - message = "Access denied", - // Jackson writes this as ms-since-epoch, so we need to strip off the nanoseconds, since we'll - // be round-tripping it through Jackson before testing for equality. - timestamp = Instant.now().truncatedTo(ChronoUnit.MILLIS) - ) } private fun ManifestLocation.toMap() = @@ -324,11 +289,11 @@ internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { manifestLocation.manifest, manifestLocation.ref ) - } throws RetrofitError.httpError( + } throws SpinnakerHttpException(RetrofitError.httpError( "http://igor", Response("http://igor", 404, "", emptyList(), null), null, null - ) + )) } } @@ -350,11 +315,11 @@ internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { manifestLocation.manifest, manifestLocation.ref ) - } throws RetrofitError.httpError( + } throws SpinnakerHttpException(RetrofitError.httpError( "http://igor", Response("http://igor", 401, "", emptyList(), null), null, null - ) + )) } } @@ -365,66 +330,6 @@ internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { } } - context("keel access denied error") { - modifyFixture { - with(scmService) { - every { - getDeliveryConfigManifest( - manifestLocation.repoType, - manifestLocation.projectKey, - manifestLocation.repositorySlug, - manifestLocation.directory, - manifestLocation.manifest, - manifestLocation.ref - ) - } throws RetrofitError.httpError( - "http://keel", - Response( - "http://keel", 403, "", emptyList(), - JacksonConverter(objectMapper).toBody(accessDeniedError) as TypedInput - ), - null, null - ) - } - } - - test("task fails and includes the error details returned by keel") { - val result = execute(manifestLocation.toMap()) - expectThat(result.status).isEqualTo(ExecutionStatus.TERMINAL) - expectThat(result.context["error"]).isA().isEqualTo(accessDeniedError) - } - } - - context("delivery config parsing error") { - modifyFixture { - with(scmService) { - every { - getDeliveryConfigManifest( - manifestLocation.repoType, - manifestLocation.projectKey, - manifestLocation.repositorySlug, - manifestLocation.directory, - manifestLocation.manifest, - manifestLocation.ref - ) - } throws RetrofitError.httpError( - "http://keel", - Response( - "http://keel", 400, "", emptyList(), - JacksonConverter(objectMapper).toBody(parsingError) as TypedInput - ), - null, null - ) - } - } - - test("task fails and includes the error details returned by keel") { - val result = execute(manifestLocation.toMap()) - expectThat(result.status).isEqualTo(ExecutionStatus.TERMINAL) - expectThat(result.context["error"]).isA().isEqualTo(parsingError) - } - } - context("retryable failure to call downstream services") { modifyFixture { with(scmService) { @@ -437,11 +342,11 @@ internal class ImportDeliveryConfigTaskTests : JUnit5Minutests { manifestLocation.manifest, manifestLocation.ref ) - } throws RetrofitError.httpError( + } throws SpinnakerHttpException(RetrofitError.httpError( "http://igor", Response("http://igor", 503, "", emptyList(), null), null, null - ) + )) } } diff --git a/orca-mine/orca-mine.gradle b/orca-mine/orca-mine.gradle index f384de8f14..b0b03e4010 100644 --- a/orca-mine/orca-mine.gradle +++ b/orca-mine/orca-mine.gradle @@ -24,6 +24,10 @@ dependencies { implementation("org.springframework:spring-context") implementation("org.springframework.boot:spring-boot-autoconfigure") implementation("com.netflix.frigga:frigga") + implementation("io.spinnaker.kork:kork-retrofit") + + testImplementation("org.junit.jupiter:junit-jupiter-params") + testImplementation("com.github.tomakehurst:wiremock-jre8-standalone") } sourceSets { diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/config/MineConfiguration.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/config/MineConfiguration.groovy index 94e03f5fbc..07b1500b03 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/config/MineConfiguration.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/config/MineConfiguration.groovy @@ -17,6 +17,7 @@ package com.netflix.spinnaker.orca.mine.config import com.fasterxml.jackson.databind.ObjectMapper +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler import com.netflix.spinnaker.orca.mine.MineService import com.netflix.spinnaker.orca.retrofit.RetrofitConfiguration import com.netflix.spinnaker.orca.retrofit.logging.RetrofitSlf4jLog @@ -64,6 +65,7 @@ class MineConfiguration { .setClient(retrofitClient) .setLogLevel(retrofitLogLevel) .setLog(new RetrofitSlf4jLog(MineService)) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) .setConverter(new JacksonConverter(objectMapper)) .build() .create(MineService) diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/pipeline/AcaTaskStage.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/pipeline/AcaTaskStage.groovy index dca0c89356..73827cd471 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/pipeline/AcaTaskStage.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/pipeline/AcaTaskStage.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.mine.pipeline +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException import com.netflix.spinnaker.orca.api.pipeline.CancellableStage import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import com.netflix.spinnaker.orca.mine.MineService @@ -27,12 +28,10 @@ import com.netflix.spinnaker.orca.api.pipeline.graph.TaskNode import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import javax.annotation.Nonnull import static org.springframework.http.HttpStatus.CONFLICT -import static retrofit.RetrofitError.Kind.HTTP @Slf4j @Component @@ -79,8 +78,8 @@ class AcaTaskStage implements StageDefinitionBuilder, CancellableStage { def cancelCanaryResults = mineService.cancelCanary(stage.context.canary.id as String, reason) log.info("Canceled canary in mine (canaryId: ${stage.context.canary.id}, stageId: ${stage.id}, executionId: ${stage.execution.id}): ${reason}") return cancelCanaryResults - } catch (RetrofitError e) { - if (e.kind == HTTP && e.response.status == CONFLICT.value()) { + } catch (SpinnakerHttpException e) { + if (e.responseCode == CONFLICT.value()) { log.info("Canary (canaryId: ${stage.context.canary.id}, stageId: ${stage.id}, executionId: ${stage.execution.id}) has already ended") return [:] } else { diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/DisableCanaryTask.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/DisableCanaryTask.groovy index a7ec625f9e..03912761dc 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/DisableCanaryTask.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/DisableCanaryTask.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.mine.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.Task import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution @@ -27,7 +28,6 @@ import com.netflix.spinnaker.orca.mine.MineService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import javax.annotation.Nonnull @@ -49,7 +49,7 @@ class DisableCanaryTask implements CloudProviderAware, Task { unhealthy : true ]).build() } - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { log.error("Exception occurred while getting canary status with id {} from mine, continuing with disable", stage.context.canary.id, e) } diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorAcaTaskTask.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorAcaTaskTask.groovy index 3cd507d373..2b9f92a601 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorAcaTaskTask.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorAcaTaskTask.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.mine.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.security.AuthenticatedRequest import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import java.util.concurrent.TimeUnit @@ -28,7 +29,6 @@ import com.netflix.spinnaker.orca.mine.MineService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError @Component @Slf4j @@ -51,7 +51,7 @@ class MonitorAcaTaskTask implements CloudProviderAware, OverridableTimeoutRetrya outputs << [ canary: canary ] - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { log.error("Exception occurred while getting canary with id ${context.canary.id} from mine service", e) return TaskResult.builder(ExecutionStatus.RUNNING).context(outputs).build() } diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorCanaryTask.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorCanaryTask.groovy index 18b4961022..d6d45ccc2e 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorCanaryTask.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/MonitorCanaryTask.groovy @@ -16,6 +16,7 @@ package com.netflix.spinnaker.orca.mine.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution import java.util.concurrent.TimeUnit @@ -29,7 +30,6 @@ import com.netflix.spinnaker.orca.mine.MineService import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError @Component @Slf4j @@ -59,7 +59,7 @@ class MonitorCanaryTask implements CloudProviderAware, OverridableTimeoutRetryab outputs << [ canary : mineService.getCanary(context.canary.id) ] - } catch (RetrofitError e) { + } catch (SpinnakerServerException e) { log.error("Exception occurred while getting canary with id ${context.canary.id} from mine service", e) return TaskResult.builder(ExecutionStatus.RUNNING).context(outputs).build() } diff --git a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTask.groovy b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTask.groovy index ac56935e2e..9228b8357b 100644 --- a/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTask.groovy +++ b/orca-mine/src/main/groovy/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTask.groovy @@ -16,6 +16,10 @@ package com.netflix.spinnaker.orca.mine.tasks +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerConversionException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.ExecutionStatus import com.netflix.spinnaker.orca.api.pipeline.Task import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution @@ -25,7 +29,6 @@ import com.netflix.spinnaker.orca.mine.pipeline.DeployCanaryStage import groovy.util.logging.Slf4j import org.springframework.beans.factory.annotation.Autowired import org.springframework.stereotype.Component -import retrofit.RetrofitError import retrofit.client.Response import javax.annotation.Nonnull @@ -59,20 +62,34 @@ class RegisterCanaryTask implements Task { if (response.status == 200 && response.body.mimeType().startsWith('text/plain')) { canaryId = response.body.in().text } - } catch (RetrofitError re) { + } catch (SpinnakerHttpException re) { def response = [:] try { - response = re.getBodyAs(Map) as Map + def responseBody = re.responseBody + response = responseBody!=null ? responseBody : response } catch (Exception e) { response.error = e.message } - response.status = re.response?.status - response.errorKind = re.kind + response.status = re.responseCode + response.errorKind = "HTTP" throw new IllegalStateException( "Unable to register canary (executionId: ${stage.execution.id}, stageId: ${stage.id} canary: ${c}), response: ${response}" ) + } catch(SpinnakerServerException e){ + def response = [:] + response.status = null + if (e instanceof SpinnakerNetworkException){ + response.errorKind = "NETWORK" + } else if(e instanceof SpinnakerConversionException) { + response.errorKind = "CONVERSION" + } else { + response.errorKind = "UNEXPECTED" + } + throw new IllegalStateException( + "Unable to register canary (executionId: ${stage.execution.id}, stageId: ${stage.id} canary: ${c}), response: ${response}" + ) } log.info("Registered Canary (executionId: ${stage.execution.id}, stageId: ${stage.id}, canaryId: ${canaryId})") diff --git a/orca-mine/src/test/java/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTaskTest.java b/orca-mine/src/test/java/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTaskTest.java new file mode 100644 index 0000000000..1beb5f57f1 --- /dev/null +++ b/orca-mine/src/test/java/com/netflix/spinnaker/orca/mine/tasks/RegisterCanaryTaskTest.java @@ -0,0 +1,247 @@ +/* + * Copyright 2024 OpsMx, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.orca.mine.tasks; + +import static com.github.tomakehurst.wiremock.client.WireMock.aResponse; +import static com.github.tomakehurst.wiremock.client.WireMock.urlPathEqualTo; +import static com.github.tomakehurst.wiremock.core.WireMockConfiguration.wireMockConfig; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.github.tomakehurst.wiremock.client.WireMock; +import com.github.tomakehurst.wiremock.http.Fault; +import com.github.tomakehurst.wiremock.http.HttpHeaders; +import com.github.tomakehurst.wiremock.junit5.WireMockExtension; +import com.github.tomakehurst.wiremock.junit5.WireMockRuntimeInfo; +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler; +import com.netflix.spinnaker.orca.api.pipeline.models.StageExecution; +import com.netflix.spinnaker.orca.mine.MineService; +import com.netflix.spinnaker.orca.mine.pipeline.DeployCanaryStage; +import com.netflix.spinnaker.orca.pipeline.model.PipelineExecutionImpl; +import com.netflix.spinnaker.orca.pipeline.model.StageExecutionImpl; +import com.netflix.spinnaker.orca.retrofit.logging.RetrofitSlf4jLog; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.UUID; +import org.jetbrains.annotations.NotNull; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.RegisterExtension; +import org.springframework.http.HttpStatus; +import retrofit.RestAdapter; +import retrofit.client.OkClient; +import retrofit.client.Response; +import retrofit.converter.JacksonConverter; +import retrofit.mime.TypedString; + +public class RegisterCanaryTaskTest { + + private static MineService mineService; + + private static RegisterCanaryTask registerCanaryTask; + + private static StageExecution deployCanaryStage; + + private static ObjectMapper objectMapper = new ObjectMapper(); + + @RegisterExtension + static WireMockExtension wireMock = + WireMockExtension.newInstance().options(wireMockConfig().dynamicPort()).build(); + + @BeforeAll + static void setupOnce(WireMockRuntimeInfo wmRuntimeInfo) { + OkClient okClient = new OkClient(); + RestAdapter.LogLevel retrofitLogLevel = RestAdapter.LogLevel.NONE; + + mineService = + new RestAdapter.Builder() + .setEndpoint(wmRuntimeInfo.getHttpBaseUrl()) + .setClient(okClient) + .setLogLevel(retrofitLogLevel) + .setErrorHandler(SpinnakerRetrofitErrorHandler.getInstance()) + .setLog(new RetrofitSlf4jLog(MineService.class)) + .setConverter(new JacksonConverter(objectMapper)) + .build() + .create(MineService.class); + + registerCanaryTask = new RegisterCanaryTask(); + registerCanaryTask.setMineService(mineService); + } + + @BeforeEach + public void setup() { + var pipeline = PipelineExecutionImpl.newPipeline("foo"); + + var canaryStageId = UUID.randomUUID().toString(); + var parentStageId = UUID.randomUUID().toString(); + + deployCanaryStage = getDeployCanaryStage(pipeline, canaryStageId); + deployCanaryStage.setParentStageId(parentStageId); + var monitorCanaryStage = + new StageExecutionImpl(pipeline, "monitorCanary", Collections.emptyMap()); + + pipeline.getStages().addAll(List.of(deployCanaryStage, monitorCanaryStage)); + } + + private static void simulateFault(String url, String body, HttpStatus httpStatus) { + wireMock.givenThat( + WireMock.post(urlPathEqualTo(url)) + .willReturn( + aResponse() + .withHeaders(HttpHeaders.noHeaders()) + .withStatus(httpStatus.value()) + .withBody(body))); + } + + private static void simulateFault(String url, Fault fault) { + wireMock.givenThat(WireMock.post(urlPathEqualTo(url)).willReturn(aResponse().withFault(fault))); + } + + @Test + public void verifyRegisterCanaryThrowsHttpError() throws Exception { + + var url = "https://mine.service.com/registerCanary"; + + Response response = + new Response( + url, + HttpStatus.NOT_ACCEPTABLE.value(), + HttpStatus.NOT_ACCEPTABLE.getReasonPhrase(), + List.of(), + new TypedString("canaryId")); + + String errorResponseBody = objectMapper.writeValueAsString(response); + + // simulate error HTTP 406 + simulateFault("/registerCanary", errorResponseBody, HttpStatus.NOT_ACCEPTABLE); + + var canaryObject = (LinkedHashMap) deployCanaryStage.getContext().get("canary"); + canaryObject.put("application", "foo"); + + var canaryConfig = (LinkedHashMap) canaryObject.get("canaryConfig"); + canaryConfig.put("name", deployCanaryStage.getExecution().getId()); + canaryConfig.put("application", "foo"); + + // Format the canary data as per error log message + var canary = + Objects.toString(deployCanaryStage.getContext().get("canary")) + .replace("{", "[") + .replace("}", "]") + .replace("=", ":"); + + assertThatThrownBy(() -> registerCanaryTask.execute(deployCanaryStage)) + .hasMessageStartingWith( + String.format( + "Unable to register canary (executionId: %s, stageId: %s canary: %s)", + deployCanaryStage.getExecution().getId(), deployCanaryStage.getId(), canary)) + .hasMessageContaining("response: [") + .hasMessageContaining("reason:" + HttpStatus.NOT_ACCEPTABLE.getReasonPhrase()) + .hasMessageContaining("body:[bytes:Y2FuYXJ5SWQ=]") + .hasMessageContaining("errorKind:HTTP") + .hasMessageContaining("url:" + url) + .hasMessageContaining("status:" + HttpStatus.NOT_ACCEPTABLE.value()); + } + + @Test + public void verifyRegisterCanaryThrowsNetworkError() { + + // simulate network error + simulateFault("/registerCanary", Fault.CONNECTION_RESET_BY_PEER); + + var canaryObject = (LinkedHashMap) deployCanaryStage.getContext().get("canary"); + canaryObject.put("application", "foo"); + + var canaryConfig = (LinkedHashMap) canaryObject.get("canaryConfig"); + canaryConfig.put("name", deployCanaryStage.getExecution().getId()); + canaryConfig.put("application", "foo"); + + // Format the canary data as per error log message + var canary = + Objects.toString(deployCanaryStage.getContext().get("canary")) + .replace("{", "[") + .replace("}", "]") + .replace("=", ":"); + + String errorResponseBody = "[status:null, errorKind:NETWORK]"; + + assertThatThrownBy(() -> registerCanaryTask.execute(deployCanaryStage)) + .hasMessage( + String.format( + "Unable to register canary (executionId: %s, stageId: %s canary: %s), response: %s", + deployCanaryStage.getExecution().getId(), + deployCanaryStage.getId(), + canary, + errorResponseBody)); + } + + /* + * Populate Register canary stage execution test data, + * {@link LinkedHashMap} is used to maintain the insertion order , so the assertions will be much simpler + * */ + @NotNull + private static StageExecutionImpl getDeployCanaryStage( + PipelineExecutionImpl pipeline, String canaryStageId) { + return new StageExecutionImpl( + pipeline, + DeployCanaryStage.PIPELINE_CONFIG_TYPE, + new LinkedHashMap<>( + Map.of( + "canaryStageId", + canaryStageId, + "account", + "test", + "canary", + new LinkedHashMap<>( + Map.of( + "owner", + new LinkedHashMap<>( + Map.of("name", "cfieber", "email", "cfieber@netflix.com")), + "watchers", + Collections.emptyList(), + "canaryConfig", + new LinkedHashMap<>( + Map.of( + "lifetimeHours", + 1, + "combinedCanaryResultStrategy", + "LOWEST", + "canarySuccessCriteria", + new LinkedHashMap<>(Map.of("canaryResultScore", 95)), + "canaryHealthCheckHandler", + new LinkedHashMap<>( + Map.of( + "minimumCanaryResultScore", + 75, + "@class", + "com.netflix.spinnaker.mine.CanaryResultHealthCheckHandler")), + "canaryAnalysisConfig", + new LinkedHashMap<>( + Map.of( + "name", + "beans", + "beginCanaryAnalysisAfterMins", + 5, + "notificationHours", + List.of(1, 2), + "canaryAnalysisIntervalMins", + 15))))))))); + } +} diff --git a/orca-pipelinetemplate/orca-pipelinetemplate.gradle b/orca-pipelinetemplate/orca-pipelinetemplate.gradle index 40ae7e3d80..3f7ba0448c 100644 --- a/orca-pipelinetemplate/orca-pipelinetemplate.gradle +++ b/orca-pipelinetemplate/orca-pipelinetemplate.gradle @@ -10,7 +10,7 @@ dependencies { implementation("com.hubspot.jinjava:jinjava") implementation("com.fasterxml.jackson.dataformat:jackson-dataformat-yaml") implementation("com.fasterxml.jackson.module:jackson-module-kotlin") - implementation("com.jayway.jsonpath:json-path:2.2.0") + implementation("com.jayway.jsonpath:json-path") implementation("org.slf4j:slf4j-api") implementation("org.springframework.boot:spring-boot-autoconfigure") implementation("org.springframework:spring-context") diff --git a/orca-retrofit/orca-retrofit.gradle b/orca-retrofit/orca-retrofit.gradle index 4629990cce..1a25549c05 100644 --- a/orca-retrofit/orca-retrofit.gradle +++ b/orca-retrofit/orca-retrofit.gradle @@ -37,6 +37,8 @@ dependencies { testImplementation("org.junit.jupiter:junit-jupiter-params") testImplementation("org.mockito:mockito-core") testImplementation("org.springframework.boot:spring-boot-test") + testImplementation("com.squareup.okhttp3:mockwebserver") + testImplementation("com.squareup.retrofit2:converter-jackson") testRuntimeOnly("org.junit.jupiter:junit-jupiter-engine") testRuntimeOnly("org.springframework:spring-test") diff --git a/orca-retrofit/src/main/groovy/com/netflix/spinnaker/orca/retrofit/exceptions/BaseRetrofitExceptionHandler.groovy b/orca-retrofit/src/main/groovy/com/netflix/spinnaker/orca/retrofit/exceptions/BaseRetrofitExceptionHandler.groovy index 7c184bf8ac..25bbb52a03 100644 --- a/orca-retrofit/src/main/groovy/com/netflix/spinnaker/orca/retrofit/exceptions/BaseRetrofitExceptionHandler.groovy +++ b/orca-retrofit/src/main/groovy/com/netflix/spinnaker/orca/retrofit/exceptions/BaseRetrofitExceptionHandler.groovy @@ -24,6 +24,10 @@ import static java.net.HttpURLConnection.* abstract class BaseRetrofitExceptionHandler implements ExceptionHandler { boolean shouldRetry(Exception e, String kind, Integer responseCode) { + return shouldRetry(e, kind, null, responseCode) + } + + boolean shouldRetry(Exception e, String kind, String httpMethod, Integer responseCode) { if (isMalformedRequest(kind, e.getMessage())) { return false } @@ -33,7 +37,11 @@ abstract class BaseRetrofitExceptionHandler implements ExceptionHandler { return true } - return isIdempotentRequest(e) && (isNetworkError(kind) || isGatewayErrorCode(kind, responseCode) || isThrottle(kind, responseCode)) + if(httpMethod == null) { + httpMethod = findHttpMethodAnnotation(e) + } + + return isIdempotentRequest(httpMethod) && (isNetworkError(kind) || isGatewayErrorCode(kind, responseCode) || isThrottle(kind, responseCode)) } private boolean isGatewayErrorCode(String kind, Integer responseCode) { @@ -55,8 +63,8 @@ abstract class BaseRetrofitExceptionHandler implements ExceptionHandler { return "UNEXPECTED".equals(kind) && exceptionMessage?.contains("Path parameter") } - private static boolean isIdempotentRequest(Exception e) { - findHttpMethodAnnotation(e) in ["GET", "HEAD", "DELETE", "PUT"] + private static boolean isIdempotentRequest(String httpMethod) { + httpMethod in ["GET", "HEAD", "DELETE", "PUT"] } private static String findHttpMethodAnnotation(Exception exception) { diff --git a/orca-retrofit/src/main/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandler.java b/orca-retrofit/src/main/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandler.java index a4501a2ed0..8e5b74a598 100644 --- a/orca-retrofit/src/main/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandler.java +++ b/orca-retrofit/src/main/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandler.java @@ -43,6 +43,7 @@ public Response handle(String taskName, Exception exception) { String kind; Integer responseCode = null; + String httpMethod = ex.getHttpMethod(); if (ex instanceof SpinnakerNetworkException) { kind = "NETWORK"; @@ -108,6 +109,12 @@ public Response handle(String taskName, Exception exception) { responseDetails.put("kind", kind); + // http method may be null if exception is created from RetrofitError + // so only include in responseDetails when value is valid + if (httpMethod != null) { + responseDetails.put("method", httpMethod); + } + // Although Spinnaker*Exception has a retryable property that other parts of // spinnaker use, ignore it here for compatibility with // RetrofitExceptionHandler, specifically because that doesn't retry (most) @@ -116,6 +123,6 @@ public Response handle(String taskName, Exception exception) { ex.getClass().getSimpleName(), taskName, responseDetails, - shouldRetry(ex, kind, responseCode)); + shouldRetry(ex, kind, httpMethod, responseCode)); } } diff --git a/orca-retrofit/src/test/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandlerTest.java b/orca-retrofit/src/test/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandlerTest.java index 3d4af3c1df..978bf3c342 100644 --- a/orca-retrofit/src/test/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandlerTest.java +++ b/orca-retrofit/src/test/java/com/netflix/spinnaker/orca/retrofit/exceptions/SpinnakerServerExceptionHandlerTest.java @@ -17,6 +17,7 @@ package com.netflix.spinnaker.orca.retrofit.exceptions; import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.catchThrowableOfType; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -24,6 +25,7 @@ import com.google.common.base.Throwables; import com.google.common.collect.ImmutableMap; import com.google.gson.Gson; +import com.netflix.spinnaker.kork.retrofit.ErrorHandlingExecutorCallAdapterFactory; import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException; import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerNetworkException; import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerRetrofitErrorHandler; @@ -33,10 +35,18 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Callable; +import java.util.concurrent.TimeUnit; import java.util.stream.Stream; +import okhttp3.OkHttpClient; +import okhttp3.mockwebserver.MockResponse; +import okhttp3.mockwebserver.MockWebServer; +import okhttp3.mockwebserver.SocketPolicy; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import org.springframework.http.HttpStatus; import retrofit.RestAdapter; import retrofit.RetrofitError; import retrofit.client.Client; @@ -45,9 +55,16 @@ import retrofit.converter.GsonConverter; import retrofit.converter.JacksonConverter; import retrofit.mime.TypedString; +import retrofit2.Call; +import retrofit2.Retrofit; +import retrofit2.converter.jackson.JacksonConverterFactory; class SpinnakerServerExceptionHandlerTest { - private static final String URL = "https://some-url"; + private static Retrofit2Service retrofit2Service; + + private static final MockWebServer mockWebServer = new MockWebServer(); + + private static final String URL = mockWebServer.url("https://some-url").toString(); private static final String TASK_NAME = "task name"; @@ -135,6 +152,35 @@ class SpinnakerServerExceptionHandlerTest { SpinnakerServerExceptionHandler spinnakerServerExceptionHandler = new SpinnakerServerExceptionHandler(); + @BeforeAll + static void setupOnce() { + retrofit2Service = + new Retrofit.Builder() + .baseUrl(mockWebServer.url("/")) + .client( + new OkHttpClient.Builder() + .callTimeout(1, TimeUnit.SECONDS) + .connectTimeout(1, TimeUnit.SECONDS) + .build()) + .addCallAdapterFactory(ErrorHandlingExecutorCallAdapterFactory.getInstance()) + .addConverterFactory(JacksonConverterFactory.create()) + .build() + .create(Retrofit2Service.class); + } + + @AfterAll + static void shutdownOnce() throws Exception { + mockWebServer.shutdown(); + } + + interface Retrofit2Service { + @retrofit2.http.GET("/retrofit2") + Call getRetrofit2(); + + @retrofit2.http.POST("/retrofit2") + Call postRetrofit2(); + } + @ParameterizedTest(name = "{index} => onlyHandlesSpinnakerServerExceptionAndSubclasses {0}") @MethodSource("exceptionsForHandlesTest") void onlyHandlesSpinnakerServerExceptionAndSubclasses(Exception ex, boolean supported) { @@ -153,12 +199,71 @@ private static Stream exceptionsForHandlesTest() { } @ParameterizedTest(name = "{index} => testRetryable {0}") - @MethodSource("exceptionsForRetryTest") + @MethodSource({"exceptionsForRetryTest", "retrofit2Exceptions"}) void testRetryable(SpinnakerServerException ex, boolean expectedRetryable) { ExceptionHandler.Response response = spinnakerServerExceptionHandler.handle(TASK_NAME, ex); assertThat(response.isShouldRetry()).isEqualTo(expectedRetryable); } + private static Stream retrofit2Exceptions() { + String responseBody = "{\"test\":\"test\"}"; + + // HTTP 503 is always retryable + mockWebServer.enqueue( + new MockResponse() + .setResponseCode(HttpStatus.SERVICE_UNAVAILABLE.value()) + .setBody(responseBody)); + SpinnakerServerException retryable = + catchThrowableOfType( + () -> retrofit2Service.getRetrofit2().execute(), SpinnakerServerException.class); + + // not retryable because POST is not idempotent + mockWebServer.enqueue( + new MockResponse().setResponseCode(HttpStatus.BAD_REQUEST.value()).setBody(responseBody)); + SpinnakerServerException notRetryable = + catchThrowableOfType( + () -> retrofit2Service.postRetrofit2().execute(), SpinnakerServerException.class); + + // not retryable despite idempotent method because response code not within 429, 502, 503, 504 + mockWebServer.enqueue( + new MockResponse().setResponseCode(HttpStatus.UNAUTHORIZED.value()).setBody(responseBody)); + SpinnakerServerException notRetryableIdempotent = + catchThrowableOfType( + () -> retrofit2Service.getRetrofit2().execute(), SpinnakerServerException.class); + + // idempotent network errors are retryable + mockWebServer.enqueue(new MockResponse().setSocketPolicy(SocketPolicy.NO_RESPONSE)); + SpinnakerServerException idempotentNetworkException = + catchThrowableOfType( + () -> retrofit2Service.getRetrofit2().execute(), SpinnakerNetworkException.class); + + // throttling is retryable if idempotent + mockWebServer.enqueue( + new MockResponse() + .setResponseCode(HttpStatus.TOO_MANY_REQUESTS.value()) + .setBody(responseBody)); + SpinnakerServerException throttledIdempotent = + catchThrowableOfType( + () -> retrofit2Service.getRetrofit2().execute(), SpinnakerServerException.class); + + // throttling is not retryable if not idempotent + mockWebServer.enqueue( + new MockResponse() + .setResponseCode(HttpStatus.TOO_MANY_REQUESTS.value()) + .setBody(responseBody)); + SpinnakerServerException throttledNonIdempotent = + catchThrowableOfType( + () -> retrofit2Service.postRetrofit2().execute(), SpinnakerServerException.class); + + return Stream.of( + Arguments.of(retryable, true), + Arguments.of(notRetryable, false), + Arguments.of(notRetryableIdempotent, false), + Arguments.of(idempotentNetworkException, true), + Arguments.of(throttledIdempotent, true), + Arguments.of(throttledNonIdempotent, false)); + } + private static Stream exceptionsForRetryTest() throws Exception { // This isn't retryable because it's not considered an idempotent request // since there's no retrofit http method annotation in the exception's stack @@ -220,7 +325,7 @@ private static Stream exceptionsForRetryTest() throws Exception { } @ParameterizedTest(name = "{index} => verifyResponseDetails {0}") - @MethodSource("exceptionsForResponseDetailsTest") + @MethodSource({"exceptionsForResponseDetailsTest", "nonRetryableRetrofit2Exceptions"}) void verifyResponseDetails(SpinnakerHttpException spinnakerHttpException) { ExceptionHandler.Response response = spinnakerServerExceptionHandler.handle(TASK_NAME, spinnakerHttpException); @@ -229,6 +334,7 @@ void verifyResponseDetails(SpinnakerHttpException spinnakerHttpException) { // expected. This duplicates some logic in SpinnakerServerExceptionHandler, // but at least it helps detect future changes. Map responseBody = spinnakerHttpException.getResponseBody(); + String httpMethod = spinnakerHttpException.getHttpMethod(); String error = null; List errors = null; @@ -238,6 +344,10 @@ void verifyResponseDetails(SpinnakerHttpException spinnakerHttpException) { .put("url", spinnakerHttpException.getUrl()) .put("status", spinnakerHttpException.getResponseCode()); + if (httpMethod != null) { + builder.put("method", httpMethod); + } + if (responseBody == null) { error = spinnakerHttpException.getMessage(); errors = List.of(); @@ -285,6 +395,12 @@ private static Stream exceptionsForResponseDetailsTest() .map(SpinnakerServerExceptionHandlerTest::makeSpinnakerHttpException); } + private static Stream nonRetryableRetrofit2Exceptions() { + return retrofit2Exceptions() + .filter(args -> args.get()[1].equals(false)) + .map(args -> ((SpinnakerHttpException) args.get()[0])); + } + private void compareResponse( ExceptionHandler.Response expectedResponse, ExceptionHandler.Response actualResponse) { assertThat(actualResponse).isNotNull(); diff --git a/orca-web/orca-web.gradle b/orca-web/orca-web.gradle index 82ad37be6c..c9e6a4c1c5 100644 --- a/orca-web/orca-web.gradle +++ b/orca-web/orca-web.gradle @@ -63,6 +63,7 @@ dependencies { implementation("net.logstash.logback:logstash-logback-encoder") implementation("io.spinnaker.fiat:fiat-api:$fiatVersion") implementation("io.spinnaker.fiat:fiat-core:$fiatVersion") + implementation("io.spinnaker.kork:kork-retrofit") runtimeOnly("io.spinnaker.kork:kork-runtime") diff --git a/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/OperationsController.groovy b/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/OperationsController.groovy index c0a6cf9a64..1352c43fae 100644 --- a/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/OperationsController.groovy +++ b/orca-web/src/main/groovy/com/netflix/spinnaker/orca/controllers/OperationsController.groovy @@ -23,6 +23,8 @@ import com.netflix.spinnaker.fiat.shared.FiatService import com.netflix.spinnaker.fiat.shared.FiatStatus import com.netflix.spinnaker.kork.exceptions.ConfigurationException import com.netflix.spinnaker.kork.exceptions.SpinnakerException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerHttpException +import com.netflix.spinnaker.kork.retrofit.exceptions.SpinnakerServerException import com.netflix.spinnaker.orca.api.pipeline.models.PipelineExecution import com.netflix.spinnaker.orca.api.pipeline.models.Trigger import com.netflix.spinnaker.orca.clouddriver.service.JobService @@ -327,12 +329,14 @@ class OperationsController { def buildInfo try { buildInfo = buildService.getBuild(trigger.buildNumber, trigger.master, trigger.job) - } catch (RetrofitError e) { - if (e.response?.status == 404) { + } catch (SpinnakerHttpException e) { + if (e.responseCode == 404) { throw new ConfigurationException("Build ${trigger.buildNumber} of ${trigger.master}/${trigger.job} not found") } else { throw new OperationFailedException("Failed to get build ${trigger.buildNumber} of ${trigger.master}/${trigger.job}", e) } + } catch (SpinnakerServerException e){ + throw new OperationFailedException("Failed to get build ${trigger.buildNumber} of ${trigger.master}/${trigger.job}", e) } if (buildInfo?.artifacts) { if (trigger.type == "manual") { @@ -348,12 +352,14 @@ class OperationsController { trigger.master as String, trigger.job as String ) - } catch (RetrofitError e) { - if (e.response?.status == 404) { + } catch (SpinnakerHttpException e) { + if (e.responseCode == 404) { throw new ConfigurationException("Expected properties file " + trigger.propertyFile + " (configured on trigger), but it was missing") } else { throw new OperationFailedException("Failed to get properties file ${trigger.propertyFile}", e) } + } catch (SpinnakerServerException e){ + throw new OperationFailedException("Failed to get properties file ${trigger.propertyFile}", e) } } } diff --git a/orca-web/src/test/java/com/netflix/spinnaker/orca/controllers/OperationsControllerTest.java b/orca-web/src/test/java/com/netflix/spinnaker/orca/controllers/OperationsControllerTest.java new file mode 100644 index 0000000000..d94450575f --- /dev/null +++ b/orca-web/src/test/java/com/netflix/spinnaker/orca/controllers/OperationsControllerTest.java @@ -0,0 +1,130 @@ +/* + * Copyright 2023 Salesforce, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.netflix.spinnaker.orca.controllers; + +import static org.springframework.test.web.servlet.request.MockMvcRequestBuilders.post; +import static org.springframework.test.web.servlet.result.MockMvcResultHandlers.print; +import static org.springframework.test.web.servlet.result.MockMvcResultMatchers.status; +import static org.springframework.test.web.servlet.setup.MockMvcBuilders.webAppContextSetup; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.netflix.spinnaker.orca.Main; +import com.netflix.spinnaker.orca.notifications.NotificationClusterLock; +import com.netflix.spinnaker.orca.pipeline.persistence.ExecutionRepository; +import com.netflix.spinnaker.orca.q.pending.PendingExecutionService; +import java.nio.charset.StandardCharsets; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInfo; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.boot.test.mock.mockito.MockBean; +import org.springframework.http.MediaType; +import org.springframework.test.context.TestPropertySource; +import org.springframework.test.web.servlet.MockMvc; +import org.springframework.web.context.WebApplicationContext; + +@SpringBootTest(classes = Main.class) +@TestPropertySource( + properties = { + "spring.config.location=classpath:orca-test.yml", + "keiko.queue.redis.enabled = false" + }) +class OperationsControllerTest { + + private MockMvc webAppMockMvc; + + @Autowired private WebApplicationContext webApplicationContext; + + @Autowired ObjectMapper objectMapper; + + @MockBean ExecutionRepository executionRepository; + + @MockBean PendingExecutionService pendingExecutionService; + + @MockBean NotificationClusterLock notificationClusterLock; + + @BeforeEach + void init(TestInfo testInfo) { + System.out.println("--------------- Test " + testInfo.getDisplayName()); + webAppMockMvc = webAppContextSetup(webApplicationContext).build(); + } + + @Test + void orchestrateWithEmptyRequestBody() throws Exception { + webAppMockMvc + .perform( + post("/orchestrate") + .contentType(MediaType.APPLICATION_JSON_VALUE) + .characterEncoding(StandardCharsets.UTF_8.toString())) + .andDo(print()) + .andExpect(status().isBadRequest()); + } + + @Test + void orchestrateWithEmptyPipelineMap() throws Exception { + webAppMockMvc + .perform( + post("/orchestrate") + .contentType(MediaType.APPLICATION_JSON_VALUE) + .characterEncoding(StandardCharsets.UTF_8.toString()) + .content(objectMapper.writeValueAsString(Map.of()))) + .andDo(print()) + // It's a little marginal to accept an empty pipeline since it's likely + // a user error to try to run a pipeline that doesn't do anything. + .andExpect(status().isOk()); + } + + @Test + void orchestrateWithoutStages() throws Exception { + Map pipelineWithoutStages = + Map.of("application", "my-application", "name", "my-pipeline-name"); + webAppMockMvc + .perform( + post("/orchestrate") + .contentType(MediaType.APPLICATION_JSON_VALUE) + .characterEncoding(StandardCharsets.UTF_8.toString()) + .content(objectMapper.writeValueAsString(pipelineWithoutStages))) + .andDo(print()) + // It's a little marginal to accept a pipeline without stages since it's + // likely a user error to try to run a pipeline that doesn't do + // anything. + .andExpect(status().isOk()); + } + + @Test + void orchestrateTreatsMissingTypeAsBadRequest() throws Exception { + Map pipelineWithStageWithoutType = + Map.of( + "application", + "my-application", + "name", + "my-pipeline-name", + "stages", + List.of(Map.of("name", "my-pipeline-stage-name"))); + webAppMockMvc + .perform( + post("/orchestrate") + .contentType(MediaType.APPLICATION_JSON_VALUE) + .characterEncoding(StandardCharsets.UTF_8.toString()) + .content(objectMapper.writeValueAsString(pipelineWithStageWithoutType))) + .andDo(print()) + .andExpect(status().isBadRequest()); + } +} diff --git a/orca-webhook/orca-webhook.gradle b/orca-webhook/orca-webhook.gradle index e7833d7f6f..236edc029e 100644 --- a/orca-webhook/orca-webhook.gradle +++ b/orca-webhook/orca-webhook.gradle @@ -25,7 +25,7 @@ dependencies { implementation("org.springframework.boot:spring-boot-autoconfigure") compileOnly("org.projectlombok:lombok") annotationProcessor("org.projectlombok:lombok") - implementation("com.jayway.jsonpath:json-path:2.2.0") + implementation("com.jayway.jsonpath:json-path") implementation("com.squareup.okhttp3:okhttp") implementation("com.jakewharton.retrofit:retrofit1-okhttp3-client:1.1.0") testImplementation("org.springframework:spring-test")