From bc68c9c5b41d91d540ad4ba880477f96160caf4b Mon Sep 17 00:00:00 2001 From: Anoop Panicker Date: Tue, 15 Feb 2022 12:41:45 -0800 Subject: [PATCH] move contributed modules into community repo --- .github/workflows/ci.yml | 4 +- .github/workflows/publish.yml | 15 +- README.md | 1 - awss3-storage/README.md | 0 awss3-storage/build.gradle | 21 + .../dependencies.lock | 759 ++--- .../conductor/s3}/config/S3Configuration.java | 4 +- .../conductor/s3}/config/S3Properties.java | 4 +- .../s3}/storage/S3PayloadStorage.java | 4 +- ...itional-spring-configuration-metadata.json | 13 + awssqs-event-queue/README.md | 0 awssqs-event-queue/build.gradle | 16 + .../dependencies.lock | 631 ++-- .../config/SQSEventQueueConfiguration.java | 4 +- .../sqs/config/SQSEventQueueProperties.java | 2 +- .../sqs/config/SQSEventQueueProvider.java | 8 +- .../sqs/eventqueue}/SQSObservableQueue.java | 6 +- ...itional-spring-configuration-metadata.json | 27 + .../DefaultEventQueueProcessorTest.java | 2 +- .../eventqueue}/SQSObservableQueueTest.java | 4 +- azureblob-storage/README.md | 44 - azureblob-storage/build.gradle | 8 - azureblob-storage/dependencies.lock | 1943 ------------ .../config/AzureBlobConfiguration.java | 34 - .../azureblob/config/AzureBlobProperties.java | 123 - .../storage/AzureBlobPayloadStorage.java | 230 -- .../storage/AzureBlobPayloadStorageTest.java | 158 - build.gradle | 6 +- cassandra-persistence/dependencies.lock | 36 + common/build.gradle | 2 +- contribs/build.gradle | 49 - ...rchivingWithTTLWorkflowStatusListener.java | 135 - ...rchivingWorkflowListenerConfiguration.java | 37 - .../ArchivingWorkflowListenerProperties.java | 67 - .../ArchivingWorkflowStatusListener.java | 52 - .../ConductorQueueStatusPublisher.java | 85 - ...ctorQueueStatusPublisherConfiguration.java | 39 - ...nductorQueueStatusPublisherProperties.java | 49 - .../metrics/DatadogMetricsConfiguration.java | 39 - .../metrics/LoggingMetricsConfiguration.java | 85 - .../metrics/MetricsRegistryConfiguration.java | 46 - .../PrometheusMetricsConfiguration.java | 46 - .../contribs/queue/amqp/AMQPConnection.java | 391 --- .../queue/amqp/AMQPObservableQueue.java | 809 ----- .../config/AMQPEventQueueConfiguration.java | 86 - .../amqp/config/AMQPEventQueueProperties.java | 303 -- .../amqp/config/AMQPEventQueueProvider.java | 59 - .../queue/amqp/config/AMQPRetryPattern.java | 54 - .../queue/amqp/util/AMQPConfigurations.java | 39 - .../queue/amqp/util/AMQPConstants.java | 87 - .../queue/amqp/util/AMQPSettings.java | 305 -- .../queue/amqp/util/ConnectionType.java | 18 - .../contribs/queue/amqp/util/RetryType.java | 20 - .../queue/nats/NATSAbstractQueue.java | 301 -- .../queue/nats/NATSObservableQueue.java | 120 - .../queue/nats/NATSStreamObservableQueue.java | 147 - .../queue/nats/config/NATSConfiguration.java | 32 - .../nats/config/NATSEventQueueProvider.java | 83 - .../nats/config/NATSStreamConfiguration.java | 75 - .../config/NATSStreamEventQueueProvider.java | 79 - .../nats/config/NATSStreamProperties.java | 65 - .../tasks/kafka/KafkaProducerManager.java | 111 - .../tasks/kafka/KafkaPublishTask.java | 313 -- ...itional-spring-configuration-metadata.json | 136 - .../ArchivingWorkflowStatusListenerTest.java | 63 - .../LoggingMetricsConfigurationTest.java | 53 - .../PrometheusMetricsConfigurationTest.java | 78 - .../amqp/AMQPEventQueueProviderTest.java | 82 - .../queue/amqp/AMQPObservableQueueTest.java | 895 ------ .../contribs/queue/amqp/AMQPSettingsTest.java | 89 - .../tasks/kafka/KafkaProducerManagerTest.java | 135 - .../tasks/kafka/KafkaPublishTaskTest.java | 223 -- core/build.gradle | 8 +- core/dependencies.lock | 60 + .../config/ConductorCoreConfiguration.java | 2 +- .../conductor/core}/index/NoopIndexDAO.java | 2 +- .../index/NoopIndexDAOConfiguration.java | 2 +- .../core/sync/local}/LocalOnlyLock.java | 20 +- .../local}/LocalOnlyLockConfiguration.java | 2 +- .../core/sync/{ => noop}/NoopLock.java | 4 +- ...itional-spring-configuration-metadata.json | 37 +- .../core/sync/local}/LocalOnlyLockTest.java | 16 +- dependencies.gradle | 2 +- es6-persistence/dependencies.lock | 24 +- es7-persistence/README.md | 86 - es7-persistence/build.gradle | 64 - es7-persistence/dependencies.lock | 2671 ----------------- .../es7/config/ElasticSearchConditions.java | 42 - .../es7/config/ElasticSearchProperties.java | 228 -- .../config/ElasticSearchV7Configuration.java | 107 - .../dao/index/BulkRequestBuilderWrapper.java | 55 - .../es7/dao/index/BulkRequestWrapper.java | 51 - .../es7/dao/index/ElasticSearchBaseDAO.java | 90 - .../es7/dao/index/ElasticSearchRestDAOV7.java | 1135 ------- .../es7/dao/query/parser/Expression.java | 118 - .../es7/dao/query/parser/FilterProvider.java | 26 - .../dao/query/parser/GroupedExpression.java | 60 - .../es7/dao/query/parser/NameValue.java | 139 - .../query/parser/internal/AbstractNode.java | 178 -- .../dao/query/parser/internal/BooleanOp.java | 57 - .../query/parser/internal/ComparisonOp.java | 102 - .../dao/query/parser/internal/ConstValue.java | 140 - .../internal/FunctionThrowingException.java | 22 - .../dao/query/parser/internal/ListConst.java | 70 - .../es7/dao/query/parser/internal/Name.java | 41 - .../parser/internal/ParserException.java | 28 - .../es7/dao/query/parser/internal/Range.java | 80 - .../main/resources/mappings_docType_task.json | 66 - .../resources/mappings_docType_workflow.json | 72 - .../src/main/resources/template_event.json | 48 - .../src/main/resources/template_message.json | 28 - .../src/main/resources/template_task_log.json | 24 - .../index/ElasticSearchRestDaoBaseTest.java | 74 - .../es7/dao/index/ElasticSearchTest.java | 66 - .../index/TestBulkRequestBuilderWrapper.java | 50 - .../dao/index/TestElasticSearchRestDAOV7.java | 444 --- .../TestElasticSearchRestDAOV7Batch.java | 81 - .../es7/dao/query/parser/TestExpression.java | 149 - .../query/parser/TestGroupedExpression.java | 24 - .../parser/internal/AbstractParserTest.java | 27 - .../query/parser/internal/TestBooleanOp.java | 44 - .../parser/internal/TestComparisonOp.java | 44 - .../query/parser/internal/TestConstValue.java | 101 - .../dao/query/parser/internal/TestName.java | 33 - .../conductor/es7/utils/TestUtils.java | 64 - .../resources/expected_template_task_log.json | 24 - .../src/test/resources/task_summary.json | 17 - .../src/test/resources/workflow_summary.json | 12 - grpc-server/dependencies.lock | 16 + .../conductor/grpc/AbstractProtoMapper.java | 40 +- grpc/src/main/proto/model/taskdef.proto | 10 +- .../build.gradle | 13 +- {contribs => http-task}/dependencies.lock | 949 +----- .../conductor}/tasks/http/HttpTask.java | 3 +- .../DefaultRestTemplateProvider.java | 9 +- .../http/providers}/RestTemplateProvider.java | 9 +- ...itional-spring-configuration-metadata.json | 14 + .../conductor}/tasks/http/HttpTaskTest.java | 40 +- .../DefaultRestTemplateProviderTest.java | 8 +- json-jq-task/build.gradle | 21 + .../dependencies.lock | 376 +-- .../tasks/json/JsonJqTransform.java | 19 +- .../tasks/json/JsonJqTransformTest.java | 6 +- mysql-persistence/build.gradle | 42 - .../mysql/config/MySQLConfiguration.java | 115 - .../mysql/config/MySQLProperties.java | 42 - .../conductor/mysql/dao/MySQLBaseDAO.java | 258 -- .../mysql/dao/MySQLExecutionDAO.java | 1071 ------- .../conductor/mysql/dao/MySQLMetadataDAO.java | 555 ---- .../conductor/mysql/dao/MySQLQueueDAO.java | 395 --- .../conductor/mysql/util/ExecuteFunction.java | 26 - .../conductor/mysql/util/LazyToString.java | 33 - .../netflix/conductor/mysql/util/Query.java | 628 ---- .../conductor/mysql/util/QueryFunction.java | 26 - .../mysql/util/ResultSetHandler.java | 27 - .../mysql/util/TransactionalFunction.java | 27 - .../db/migration/V1__initial_schema.sql | 172 -- .../V2__queue_message_timestamps.sql | 2 - .../db/migration/V3__queue_add_priority.sql | 17 - .../V4__1009_Fix_MySQLExecutionDAO_Index.sql | 14 - .../db/migration/V5__correlation_id_index.sql | 13 - .../V6__new_qm_index_with_priority.sql | 13 - .../db/migration/V7__new_queue_message_pk.sql | 24 - .../resources/db/migration/V8__update_pk.sql | 103 - .../mysql/dao/MySQLExecutionDAOTest.java | 81 - .../mysql/dao/MySQLMetadataDAOTest.java | 286 -- .../mysql/dao/MySQLQueueDAOTest.java | 385 --- .../src/test/resources/application.properties | 6 - postgres-external-storage/README.md | 24 - postgres-external-storage/dependencies.lock | 2090 ------------- .../config/PostgresPayloadConfiguration.java | 84 - .../config/PostgresPayloadProperties.java | 134 - .../ExternalPostgresPayloadResource.java | 57 - .../storage/PostgresPayloadStorage.java | 131 - .../R__initial_schema.sql | 56 - .../ExternalPostgresPayloadResourceTest.java | 59 - .../storage/PostgresPayloadStorageTest.java | 128 - .../storage/PostgresPayloadTestUtil.java | 74 - postgres-persistence/build.gradle | 41 - .../config/PostgresConfiguration.java | 133 - .../postgres/config/PostgresProperties.java | 45 - .../postgres/dao/PostgresBaseDAO.java | 258 -- .../postgres/dao/PostgresExecutionDAO.java | 1075 ------- .../postgres/dao/PostgresMetadataDAO.java | 551 ---- .../postgres/dao/PostgresQueueDAO.java | 476 --- .../postgres/util/ExecuteFunction.java | 26 - .../conductor/postgres/util/LazyToString.java | 33 - .../conductor/postgres/util/Query.java | 628 ---- .../postgres/util/QueryFunction.java | 26 - .../postgres/util/ResultSetHandler.java | 27 - .../postgres/util/TransactionalFunction.java | 27 - .../migration_postgres/V1__initial_schema.sql | 173 -- ...2__1009_Fix_PostgresExecutionDAO_Index.sql | 3 - .../V3__correlation_id_index.sql | 3 - .../V4__new_qm_index_with_priority.sql | 3 - .../V5__new_queue_message_pk.sql | 11 - .../db/migration_postgres/V6__update_pk.sql | 77 - .../V7__new_qm_index_desc_priority.sql | 3 - .../dao/PostgresExecutionDAOTest.java | 96 - .../postgres/dao/PostgresMetadataDAOTest.java | 289 -- .../postgres/dao/PostgresQueueDAOTest.java | 409 --- .../postgres/performance/PerformanceTest.java | 454 --- .../src/test/resources/application.properties | 7 - redis-concurrency-limit/dependencies.lock | 36 + redis-lock/dependencies.lock | 36 + ...itional-spring-configuration-metadata.json | 9 + redis-persistence/dependencies.lock | 36 + rest/dependencies.lock | 36 + server/build.gradle | 15 +- server/dependencies.lock | 852 ++---- settings.gradle | 12 +- test-harness/build.gradle | 14 +- test-harness/dependencies.lock | 332 +- .../integration/KafkaPublishTaskSpec.groovy | 178 -- .../integration/grpc/GrpcEndToEndTest.java | 2 +- .../grpc/mysql/MySQLGrpcEndToEndTest.java | 47 - .../postgres/PostgresGrpcEndToEndTest.java | 47 - .../integration/http/HttpEndToEndTest.java | 2 +- ...orkflowStatusPublisherIntegrationTest.java | 210 -- zookeeper-lock/build.gradle | 9 - .../config/ZookeeperLockConfiguration.java | 32 - .../zookeeper/config/ZookeeperProperties.java | 68 - .../zookeeper/lock/ZookeeperLock.java | 123 - .../zookeeper/lock/ZookeeperLockTest.java | 177 -- 224 files changed, 1687 insertions(+), 31541 deletions(-) create mode 100644 awss3-storage/README.md create mode 100644 awss3-storage/build.gradle rename {mysql-persistence => awss3-storage}/dependencies.lock (72%) rename {contribs/src/main/java/com/netflix/conductor/contribs/storage => awss3-storage/src/main/java/com/netflix/conductor/s3}/config/S3Configuration.java (92%) rename {contribs/src/main/java/com/netflix/conductor/contribs/storage => awss3-storage/src/main/java/com/netflix/conductor/s3}/config/S3Properties.java (95%) rename {contribs/src/main/java/com/netflix/conductor/contribs => awss3-storage/src/main/java/com/netflix/conductor/s3}/storage/S3PayloadStorage.java (98%) create mode 100644 awss3-storage/src/main/resources/META-INF/additional-spring-configuration-metadata.json create mode 100644 awssqs-event-queue/README.md create mode 100644 awssqs-event-queue/build.gradle rename {postgres-persistence => awssqs-event-queue}/dependencies.lock (79%) rename {contribs/src/main/java/com/netflix/conductor/contribs/queue => awssqs-event-queue/src/main/java/com/netflix/conductor}/sqs/config/SQSEventQueueConfiguration.java (96%) rename {contribs/src/main/java/com/netflix/conductor/contribs/queue => awssqs-event-queue/src/main/java/com/netflix/conductor}/sqs/config/SQSEventQueueProperties.java (97%) rename {contribs/src/main/java/com/netflix/conductor/contribs/queue => awssqs-event-queue/src/main/java/com/netflix/conductor}/sqs/config/SQSEventQueueProvider.java (92%) rename {contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs => awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/eventqueue}/SQSObservableQueue.java (98%) create mode 100644 awssqs-event-queue/src/main/resources/META-INF/additional-spring-configuration-metadata.json rename {contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs => awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue}/DefaultEventQueueProcessorTest.java (99%) rename {contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs => awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue}/SQSObservableQueueTest.java (97%) delete mode 100644 azureblob-storage/README.md delete mode 100644 azureblob-storage/build.gradle delete mode 100644 azureblob-storage/dependencies.lock delete mode 100644 azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java delete mode 100644 azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java delete mode 100644 azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java delete mode 100644 azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java delete mode 100644 contribs/build.gradle delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/metrics/DatadogMetricsConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/metrics/MetricsRegistryConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSStreamObservableQueue.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamConfiguration.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamEventQueueProvider.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamProperties.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java delete mode 100644 contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java delete mode 100644 contribs/src/main/resources/META-INF/additional-spring-configuration-metadata.json delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfigurationTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfigurationTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java delete mode 100644 contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java rename {contribs/src/main/java/com/netflix/conductor/contribs/dao => core/src/main/java/com/netflix/conductor/core}/index/NoopIndexDAO.java (98%) rename {contribs/src/main/java/com/netflix/conductor/contribs/dao => core/src/main/java/com/netflix/conductor/core}/index/NoopIndexDAOConfiguration.java (95%) rename {contribs/src/main/java/com/netflix/conductor/contribs/lock => core/src/main/java/com/netflix/conductor/core/sync/local}/LocalOnlyLock.java (87%) rename {contribs/src/main/java/com/netflix/conductor/contribs/lock => core/src/main/java/com/netflix/conductor/core/sync/local}/LocalOnlyLockConfiguration.java (95%) rename core/src/main/java/com/netflix/conductor/core/sync/{ => noop}/NoopLock.java (92%) rename {contribs/src/test/java/com/netflix/conductor/contribs/lock => core/src/test/java/com/netflix/conductor/core/sync/local}/LocalOnlyLockTest.java (84%) delete mode 100644 es7-persistence/README.md delete mode 100644 es7-persistence/build.gradle delete mode 100644 es7-persistence/dependencies.lock delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java delete mode 100644 es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java delete mode 100644 es7-persistence/src/main/resources/mappings_docType_task.json delete mode 100644 es7-persistence/src/main/resources/mappings_docType_workflow.json delete mode 100644 es7-persistence/src/main/resources/template_event.json delete mode 100644 es7-persistence/src/main/resources/template_message.json delete mode 100644 es7-persistence/src/main/resources/template_task_log.json delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java delete mode 100644 es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java delete mode 100644 es7-persistence/src/test/resources/expected_template_task_log.json delete mode 100644 es7-persistence/src/test/resources/task_summary.json delete mode 100644 es7-persistence/src/test/resources/workflow_summary.json rename {postgres-external-storage => http-task}/build.gradle (71%) rename {contribs => http-task}/dependencies.lock (76%) rename {contribs/src/main/java/com/netflix/conductor/contribs => http-task/src/main/java/com/netflix/conductor}/tasks/http/HttpTask.java (99%) rename {contribs/src/main/java/com/netflix/conductor/contribs/tasks/http => http-task/src/main/java/com/netflix/conductor/tasks/http/providers}/DefaultRestTemplateProvider.java (91%) rename {contribs/src/main/java/com/netflix/conductor/contribs/tasks/http => http-task/src/main/java/com/netflix/conductor/tasks/http/providers}/RestTemplateProvider.java (77%) create mode 100644 http-task/src/main/resources/META-INF/additional-spring-configuration-metadata.json rename {contribs/src/test/java/com/netflix/conductor/contribs => http-task/src/test/java/com/netflix/conductor}/tasks/http/HttpTaskTest.java (92%) rename {contribs/src/test/java/com/netflix/conductor/contribs/tasks/http => http-task/src/test/java/com/netflix/conductor/tasks/http/providers}/DefaultRestTemplateProviderTest.java (92%) create mode 100644 json-jq-task/build.gradle rename {zookeeper-lock => json-jq-task}/dependencies.lock (83%) rename {contribs/src/main/java/com/netflix/conductor/contribs => json-jq-task/src/main/java/com/netflix/conductor}/tasks/json/JsonJqTransform.java (88%) rename {contribs/src/test/java/com/netflix/conductor/contribs => json-jq-task/src/test/java/com/netflix/conductor}/tasks/json/JsonJqTransformTest.java (95%) delete mode 100644 mysql-persistence/build.gradle delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java delete mode 100644 mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java delete mode 100644 mysql-persistence/src/main/resources/db/migration/V1__initial_schema.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V2__queue_message_timestamps.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V3__queue_add_priority.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V4__1009_Fix_MySQLExecutionDAO_Index.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V5__correlation_id_index.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V6__new_qm_index_with_priority.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V7__new_queue_message_pk.sql delete mode 100644 mysql-persistence/src/main/resources/db/migration/V8__update_pk.sql delete mode 100644 mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java delete mode 100644 mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java delete mode 100644 mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java delete mode 100644 mysql-persistence/src/test/resources/application.properties delete mode 100644 postgres-external-storage/README.md delete mode 100644 postgres-external-storage/dependencies.lock delete mode 100644 postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java delete mode 100644 postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java delete mode 100644 postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java delete mode 100644 postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java delete mode 100644 postgres-external-storage/src/main/resources/db/migration_external_postgres/R__initial_schema.sql delete mode 100644 postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java delete mode 100644 postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java delete mode 100644 postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java delete mode 100644 postgres-persistence/build.gradle delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java delete mode 100644 postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V1__initial_schema.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V2__1009_Fix_PostgresExecutionDAO_Index.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V3__correlation_id_index.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V4__new_qm_index_with_priority.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V5__new_queue_message_pk.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V6__update_pk.sql delete mode 100644 postgres-persistence/src/main/resources/db/migration_postgres/V7__new_qm_index_desc_priority.sql delete mode 100644 postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java delete mode 100644 postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java delete mode 100644 postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java delete mode 100644 postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java delete mode 100644 postgres-persistence/src/test/resources/application.properties delete mode 100644 test-harness/src/test/groovy/com/netflix/conductor/test/integration/KafkaPublishTaskSpec.groovy delete mode 100644 test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java delete mode 100644 test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java delete mode 100644 test-harness/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java delete mode 100644 zookeeper-lock/build.gradle delete mode 100644 zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperLockConfiguration.java delete mode 100644 zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperProperties.java delete mode 100644 zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/lock/ZookeeperLock.java delete mode 100644 zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper/lock/ZookeeperLockTest.java diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 03a851e4ec..f974f195cb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -37,12 +37,12 @@ jobs: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }} run: | - ./gradlew build -x :conductor-contribs:test --scan + ./gradlew build --scan - name: Build and Publish snapshot if: github.event_name != 'pull_request' && github.ref == 'refs/heads/main' run: | echo "Running build for commit ${{ github.sha }}" - ./gradlew build -x :conductor-contribs:test snapshot --scan + ./gradlew build snapshot --scan env: NETFLIX_OSS_SIGNING_KEY: ${{ secrets.ORG_SIGNING_KEY }} NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index 66e70b57df..e8d5a5ca0c 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -30,7 +30,7 @@ jobs: ${{ runner.os }}-gradle- - name: Publish candidate if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-rc.') - run: ./gradlew -x :conductor-contribs:test -Prelease.useLastTag=true candidate --scan + run: ./gradlew -Prelease.useLastTag=true candidate --scan env: NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }} NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }} @@ -40,7 +40,7 @@ jobs: NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} - name: Publish release if: startsWith(github.ref, 'refs/tags/v') && (!contains(github.ref, '-rc.')) - run: ./gradlew -x :conductor-contribs:test -Prelease.useLastTag=true final --scan + run: ./gradlew -Prelease.useLastTag=true final --scan env: NETFLIX_OSS_SONATYPE_USERNAME: ${{ secrets.ORG_SONATYPE_USERNAME }} NETFLIX_OSS_SONATYPE_PASSWORD: ${{ secrets.ORG_SONATYPE_PASSWORD }} @@ -48,3 +48,14 @@ jobs: NETFLIX_OSS_SIGNING_PASSWORD: ${{ secrets.ORG_SIGNING_PASSWORD }} NETFLIX_OSS_REPO_USERNAME: ${{ secrets.ORG_NETFLIXOSS_USERNAME }} NETFLIX_OSS_REPO_PASSWORD: ${{ secrets.ORG_NETFLIXOSS_PASSWORD }} + - name: Publish tag to community repo + if: startsWith(github.ref, 'refs/tags/v') + run: | + export TAG=$(git describe --tags --abbrev=0) + echo "Current release version is $TAG" + echo "Triggering community build" + curl \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Authorization: Bearer ${{ secrets.COMMUNITY_REPO_TRIGGER }}" \ + -X POST https://api.github.com/repos/Netflix/conductor-community/dispatches \ + -d '{"event_type": "publish_build","client_payload": {"tag":"'"$TAG"'"}}' diff --git a/README.md b/README.md index 2f47e9082a..9e9ec7fe92 100644 --- a/README.md +++ b/README.md @@ -64,7 +64,6 @@ Binaries are available from [Netflix OSS Maven](https://artifacts.netflix.net/ne | conductor-es6-persistence | Indexing using Elasticsearch 6.X | | conductor-rest | Spring MVC resources for the core services | | conductor-ui | node.js based UI for Conductor | -| conductor-contribs | Optional contrib package that holds extended workflow tasks and support for SQS, AMQP, etc| | conductor-client | Java client for Conductor that includes helpers for running worker tasks | | conductor-client-spring | Client starter kit for Spring | | conductor-server | Spring Boot Web Application | diff --git a/awss3-storage/README.md b/awss3-storage/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awss3-storage/build.gradle b/awss3-storage/build.gradle new file mode 100644 index 0000000000..adda2bb426 --- /dev/null +++ b/awss3-storage/build.gradle @@ -0,0 +1,21 @@ +/* + * Copyright 2022 Netflix, Inc. + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +dependencies { + implementation project(':conductor-common') + implementation project(':conductor-core') + compileOnly 'org.springframework.boot:spring-boot-starter' + + implementation "com.amazonaws:aws-java-sdk-s3:${revAwsSdk}" + implementation "org.apache.commons:commons-lang3" +} diff --git a/mysql-persistence/dependencies.lock b/awss3-storage/dependencies.lock similarity index 72% rename from mysql-persistence/dependencies.lock rename to awss3-storage/dependencies.lock index 22ac9d9e0f..5eb67b482d 100644 --- a/mysql-persistence/dependencies.lock +++ b/awss3-storage/dependencies.lock @@ -5,58 +5,54 @@ } }, "compileClasspath": { - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", + "com.amazonaws:aws-java-sdk-kms": { + "locked": "1.11.86", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.amazonaws:aws-java-sdk-s3" ] }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] + "com.amazonaws:aws-java-sdk-s3": { + "locked": "1.11.86" }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", "transitive": [ - "com.google.guava:guava" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", + "com.fasterxml.jackson.core:jackson-annotations": { + "locked": "2.11.4", "transitive": [ - "com.google.guava:guava" + "com.fasterxml.jackson.core:jackson-databind" ] }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "com.fasterxml.jackson.core:jackson-core": { + "locked": "2.11.4", "transitive": [ - "com.google.guava:guava" + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", + "com.fasterxml.jackson.core:jackson-databind": { + "locked": "2.11.4", "transitive": [ - "com.google.guava:guava" + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, - "com.google.protobuf:protobuf-java": { - "locked": "3.11.4", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "mysql:mysql-connector-java" + "com.amazonaws:aws-java-sdk-core" ] }, "com.netflix.conductor:conductor-common": { @@ -65,10 +61,17 @@ "com.netflix.conductor:conductor-core": { "project": true }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" + ] + }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" ] }, "jakarta.annotation:jakarta.annotation-api": { @@ -77,12 +80,27 @@ "org.springframework.boot:spring-boot-starter" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25" + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] }, "org.apache.commons:commons-lang3": { "locked": "3.10" }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -107,15 +125,6 @@ "org.apache.logging.log4j:log4j-web": { "locked": "2.17.1" }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.slf4j:jul-to-slf4j": { "locked": "1.7.30", "transitive": [ @@ -125,7 +134,6 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j" ] @@ -144,12 +152,6 @@ ] }, "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { "locked": "2.3.12.RELEASE" }, "org.springframework.boot:spring-boot-starter-logging": { @@ -158,9 +160,6 @@ "org.springframework.boot:spring-boot-starter" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -171,9 +170,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -187,13 +184,10 @@ "transitive": [ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-expression" ] }, "org.springframework:spring-expression": { @@ -208,26 +202,43 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.yaml:snakeyaml": { "locked": "1.26", "transitive": [ "org.springframework.boot:spring-boot-starter" ] + }, + "software.amazon.ion:ion-java": { + "locked": "1.0.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] } }, "runtimeClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" + ] + }, + "com.amazonaws:aws-java-sdk-kms": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-s3" + ] + }, + "com.amazonaws:aws-java-sdk-s3": { + "locked": "1.11.86" + }, + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -239,6 +250,7 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] @@ -246,49 +258,36 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "com.google.guava:guava" + "com.amazonaws:aws-java-sdk-core" ] }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", "transitive": [ - "com.google.guava:guava" + "com.netflix.conductor:conductor-core" ] }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", "transitive": [ - "com.google.guava:guava" + "com.github.ben-manes.caffeine:caffeine" ] }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "mysql:mysql-connector-java" + "com.netflix.conductor:conductor-core" ] }, "com.jayway.jsonpath:json-path": { @@ -324,10 +323,10 @@ "com.netflix.conductor:conductor-core" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, "commons-io:commons-io": { @@ -336,6 +335,13 @@ "com.netflix.conductor:conductor-core" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ @@ -349,20 +355,17 @@ "jakarta.xml.bind:jakarta.xml.bind-api" ] }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, "jakarta.xml.bind:jakarta.xml.bind-api": { "locked": "2.3.3", "transitive": [ "com.netflix.conductor:conductor-core" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25" + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] }, "net.minidev:accessors-smart": { "locked": "2.3.1", @@ -390,6 +393,18 @@ "com.netflix.conductor:conductor-core" ] }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -437,201 +452,81 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ - "com.google.guava:guava" + "com.github.ben-manes.caffeine:caffeine" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ "net.minidev:accessors-smart" ] }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" + "org.apache.logging.log4j:log4j-slf4j-impl" ] }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", + "software.amazon.ion:ion-java": { + "locked": "1.0.1", "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "com.amazonaws:aws-java-sdk-core" ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", + } + }, + "testCompileClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", "transitive": [ - "org.springframework:spring-context" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", + "com.amazonaws:aws-java-sdk-kms": { + "locked": "1.11.86", "transitive": [ - "org.springframework:spring-core" + "com.amazonaws:aws-java-sdk-s3" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] + "com.amazonaws:aws-java-sdk-s3": { + "locked": "1.11.86" }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", "transitive": [ - "org.springframework:spring-jdbc" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testCompileClasspath": { "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api" + "com.fasterxml.jackson.core:jackson-databind" ] }, "com.fasterxml.jackson.core:jackson-core": { "locked": "2.11.4", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", + "locked": "2.11.4", "transitive": [ - "com.google.guava:guava" + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, - "com.google.protobuf:protobuf-java": { - "locked": "3.11.4", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "mysql:mysql-connector-java" + "com.amazonaws:aws-java-sdk-core" ] }, "com.jayway.jsonpath:json-path": { @@ -652,10 +547,17 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" + ] + }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" ] }, "jakarta.activation:jakarta.activation-api": { @@ -676,16 +578,18 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "junit:junit": { "locked": "4.13.2", "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" + "org.junit.vintage:junit-vintage-engine" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25" - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -698,13 +602,6 @@ "org.mockito:mockito-core" ] }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -717,14 +614,20 @@ "com.jayway.jsonpath:json-path" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.20", + "org.apache.commons:commons-lang3": { + "locked": "3.10" + }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", "transitive": [ - "org.testcontainers:testcontainers" + "com.amazonaws:aws-java-sdk-core" ] }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", @@ -773,15 +676,6 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ @@ -869,18 +763,6 @@ "net.minidev:accessors-smart" ] }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.skyscreamer:jsonassert": { "locked": "1.5.0", "transitive": [ @@ -897,13 +779,9 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" + "org.slf4j:jul-to-slf4j" ] }, "org.springframework.boot:spring-boot": { @@ -925,13 +803,9 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, "org.springframework.boot:spring-boot-starter-log4j2": { "locked": "2.3.12.RELEASE" }, @@ -957,9 +831,6 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -970,9 +841,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -987,14 +856,11 @@ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx" + "org.springframework:spring-test" ] }, "org.springframework:spring-expression": { @@ -1009,64 +875,59 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", + "org.xmlunit:xmlunit-core": { + "locked": "2.7.0", "transitive": [ - "org.springframework:spring-jdbc" + "org.springframework.boot:spring-boot-starter-test" ] }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", + "org.yaml:snakeyaml": { + "locked": "1.26", "transitive": [ - "org.testcontainers:jdbc" + "org.springframework.boot:spring-boot-starter" ] }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", + "software.amazon.ion:ion-java": { + "locked": "1.0.1", "transitive": [ - "org.testcontainers:mysql" + "com.amazonaws:aws-java-sdk-core" ] - }, - "org.testcontainers:mysql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", + } + }, + "testRuntimeClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", "transitive": [ - "org.testcontainers:database-commons" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", + "com.amazonaws:aws-java-sdk-kms": { + "locked": "1.11.86", "transitive": [ - "org.springframework.boot:spring-boot-starter-test" + "com.amazonaws:aws-java-sdk-s3" ] }, - "org.yaml:snakeyaml": { - "locked": "1.26", + "com.amazonaws:aws-java-sdk-s3": { + "locked": "1.11.86" + }, + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", "transitive": [ - "org.springframework.boot:spring-boot-starter" + "com.amazonaws:aws-java-sdk-kms", + "com.amazonaws:aws-java-sdk-s3" ] - } - }, - "testRuntimeClasspath": { + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api", "com.netflix.conductor:conductor-core" ] }, @@ -1074,6 +935,7 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] @@ -1081,67 +943,36 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "org.testcontainers:testcontainers" + "com.amazonaws:aws-java-sdk-core" ] }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", "transitive": [ - "com.google.guava:guava" + "com.netflix.conductor:conductor-core" ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", + "locked": "2.4.0", "transitive": [ - "com.google.guava:guava" + "com.github.ben-manes.caffeine:caffeine" ] }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "mysql:mysql-connector-java" + "com.netflix.conductor:conductor-core" ] }, "com.jayway.jsonpath:json-path": { @@ -1184,10 +1015,10 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, "commons-io:commons-io": { @@ -1196,6 +1027,13 @@ "com.netflix.conductor:conductor-core" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ @@ -1222,16 +1060,18 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "junit:junit": { "locked": "4.13.2", "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" + "org.junit.vintage:junit-vintage-engine" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25" - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -1244,13 +1084,6 @@ "org.mockito:mockito-core" ] }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -1270,12 +1103,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ @@ -1283,6 +1110,18 @@ "com.netflix.conductor:conductor-core" ] }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -1350,14 +1189,11 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ - "com.google.guava:guava" + "com.github.ben-manes.caffeine:caffeine" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ @@ -1455,18 +1291,6 @@ "net.minidev:accessors-smart" ] }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.skyscreamer:jsonassert": { "locked": "1.5.0", "transitive": [ @@ -1483,14 +1307,10 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" + "org.slf4j:jul-to-slf4j" ] }, "org.springframework.boot:spring-boot": { @@ -1512,13 +1332,9 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, "org.springframework.boot:spring-boot-starter-log4j2": { "locked": "2.3.12.RELEASE" }, @@ -1544,9 +1360,6 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -1557,9 +1370,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -1574,14 +1385,11 @@ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx" + "org.springframework:spring-test" ] }, "org.springframework:spring-expression": { @@ -1596,45 +1404,12 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:mysql" - ] - }, - "org.testcontainers:mysql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:database-commons" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -1646,6 +1421,12 @@ "transitive": [ "org.springframework.boot:spring-boot-starter" ] + }, + "software.amazon.ion:ion-java": { + "locked": "1.0.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] } } } \ No newline at end of file diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Configuration.java b/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Configuration.java similarity index 92% rename from contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Configuration.java rename to awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Configuration.java index 8addc139df..a188c85046 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Configuration.java +++ b/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Configuration.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.storage.config; +package com.netflix.conductor.s3.config; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.boot.context.properties.EnableConfigurationProperties; @@ -18,8 +18,8 @@ import org.springframework.context.annotation.Configuration; import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.contribs.storage.S3PayloadStorage; import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.s3.storage.S3PayloadStorage; @Configuration @EnableConfigurationProperties(S3Properties.class) diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Properties.java b/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Properties.java similarity index 95% rename from contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Properties.java rename to awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Properties.java index fc7f5ebf43..94a515f727 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/storage/config/S3Properties.java +++ b/awss3-storage/src/main/java/com/netflix/conductor/s3/config/S3Properties.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 Netflix, Inc. + * Copyright 2022 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.storage.config; +package com.netflix.conductor.s3.config; import java.time.Duration; import java.time.temporal.ChronoUnit; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/storage/S3PayloadStorage.java b/awss3-storage/src/main/java/com/netflix/conductor/s3/storage/S3PayloadStorage.java similarity index 98% rename from contribs/src/main/java/com/netflix/conductor/contribs/storage/S3PayloadStorage.java rename to awss3-storage/src/main/java/com/netflix/conductor/s3/storage/S3PayloadStorage.java index fa53f0b0bf..86e57e685d 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/storage/S3PayloadStorage.java +++ b/awss3-storage/src/main/java/com/netflix/conductor/s3/storage/S3PayloadStorage.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.storage; +package com.netflix.conductor.s3.storage; import java.io.InputStream; import java.net.URISyntaxException; @@ -22,9 +22,9 @@ import com.netflix.conductor.common.run.ExternalStorageLocation; import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.contribs.storage.config.S3Properties; import com.netflix.conductor.core.exception.ApplicationException; import com.netflix.conductor.core.utils.IDGenerator; +import com.netflix.conductor.s3.config.S3Properties; import com.amazonaws.HttpMethod; import com.amazonaws.SdkClientException; diff --git a/awss3-storage/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/awss3-storage/src/main/resources/META-INF/additional-spring-configuration-metadata.json new file mode 100644 index 0000000000..d8c1f34e5d --- /dev/null +++ b/awss3-storage/src/main/resources/META-INF/additional-spring-configuration-metadata.json @@ -0,0 +1,13 @@ +{ + "hints": [ + { + "name": "conductor.external-payload-storage.type", + "values": [ + { + "value": "s3", + "description": "Use AWS S3 as the external payload storage." + } + ] + } + ] +} diff --git a/awssqs-event-queue/README.md b/awssqs-event-queue/README.md new file mode 100644 index 0000000000..e69de29bb2 diff --git a/awssqs-event-queue/build.gradle b/awssqs-event-queue/build.gradle new file mode 100644 index 0000000000..a795acf2ca --- /dev/null +++ b/awssqs-event-queue/build.gradle @@ -0,0 +1,16 @@ +dependencies { + implementation project(':conductor-common') + implementation project(':conductor-core') + compileOnly 'org.springframework.boot:spring-boot-starter' + + implementation "org.apache.commons:commons-lang3" + // SBMTODO: remove guava dep + implementation "com.google.guava:guava:${revGuava}" + + implementation "com.amazonaws:aws-java-sdk-sqs:${revAwsSdk}" + + implementation "io.reactivex:rxjava:${revRxJava}" + + testImplementation 'org.springframework.boot:spring-boot-starter' + testImplementation project(':conductor-common').sourceSets.test.output +} \ No newline at end of file diff --git a/postgres-persistence/dependencies.lock b/awssqs-event-queue/dependencies.lock similarity index 79% rename from postgres-persistence/dependencies.lock rename to awssqs-event-queue/dependencies.lock index 11890d7c0d..bd0c01d09a 100644 --- a/postgres-persistence/dependencies.lock +++ b/awssqs-event-queue/dependencies.lock @@ -5,6 +5,21 @@ } }, "compileClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, + "com.amazonaws:aws-java-sdk-sqs": { + "locked": "1.11.86" + }, + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -14,11 +29,23 @@ "com.fasterxml.jackson.core:jackson-core": { "locked": "2.11.4", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" + "locked": "2.11.4", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" + ] + }, + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] }, "com.google.code.findbugs:jsr305": { "locked": "3.0.2", @@ -59,21 +86,49 @@ "com.netflix.conductor:conductor-core": { "project": true }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, + "io.reactivex:rxjava": { + "locked": "1.2.2" + }, "jakarta.annotation:jakarta.annotation-api": { "locked": "1.3.5", "transitive": [ "org.springframework.boot:spring-boot-starter" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "org.apache.commons:commons-lang3": { "locked": "3.10" }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -104,12 +159,6 @@ "com.google.guava:guava" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, "org.slf4j:jul-to-slf4j": { "locked": "1.7.30", "transitive": [ @@ -119,7 +168,6 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j" ] @@ -138,12 +186,6 @@ ] }, "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { "locked": "2.3.12.RELEASE" }, "org.springframework.boot:spring-boot-starter-logging": { @@ -152,9 +194,6 @@ "org.springframework.boot:spring-boot-starter" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -165,9 +204,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -181,13 +218,10 @@ "transitive": [ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-expression" ] }, "org.springframework:spring-expression": { @@ -202,26 +236,35 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.yaml:snakeyaml": { "locked": "1.26", "transitive": [ "org.springframework.boot:spring-boot-starter" ] + }, + "software.amazon.ion:ion-java": { + "locked": "1.0.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] } }, "runtimeClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, + "com.amazonaws:aws-java-sdk-sqs": { + "locked": "1.11.86" + }, + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -233,6 +276,7 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] @@ -240,10 +284,25 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] }, + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.google.code.findbugs:jsr305": { "locked": "3.0.2", "transitive": [ @@ -251,8 +310,9 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.4.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -317,10 +377,10 @@ "com.netflix.conductor:conductor-core" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, "commons-io:commons-io": { @@ -329,8 +389,15 @@ "com.netflix.conductor:conductor-core" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, "io.reactivex:rxjava": { - "locked": "1.3.8", + "locked": "1.2.2", "transitive": [ "com.netflix.conductor:conductor-core" ] @@ -342,18 +409,18 @@ "jakarta.xml.bind:jakarta.xml.bind-api" ] }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, "jakarta.xml.bind:jakarta.xml.bind-api": { "locked": "2.3.3", "transitive": [ "com.netflix.conductor:conductor-core" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -380,6 +447,18 @@ "com.netflix.conductor:conductor-core" ] }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -427,166 +506,74 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ "net.minidev:accessors-smart" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.logging.log4j:log4j-slf4j-impl" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", + "software.amazon.ion:ion-java": { + "locked": "1.0.1", "transitive": [ - "org.springframework:spring-context" + "com.amazonaws:aws-java-sdk-core" ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", + } + }, + "testCompileClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", "transitive": [ - "org.springframework:spring-core" + "com.amazonaws:aws-java-sdk-sqs" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] + "com.amazonaws:aws-java-sdk-sqs": { + "locked": "1.11.86" }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", "transitive": [ - "org.springframework:spring-jdbc" + "com.amazonaws:aws-java-sdk-sqs" ] }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testCompileClasspath": { "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api" + "com.fasterxml.jackson.core:jackson-databind" ] }, "com.fasterxml.jackson.core:jackson-core": { "locked": "2.11.4", "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" + "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", + "locked": "2.11.4", "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor" ] }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "org.testcontainers:testcontainers" + "com.amazonaws:aws-java-sdk-core" ] }, "com.google.code.findbugs:jsr305": { @@ -640,12 +627,22 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, + "io.reactivex:rxjava": { + "locked": "1.2.2" + }, "jakarta.activation:jakarta.activation-api": { "locked": "1.2.2", "transitive": [ @@ -664,11 +661,16 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "junit:junit": { "locked": "4.13.2", "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" + "org.junit.vintage:junit-vintage-engine" ] }, "net.bytebuddy:byte-buddy": { @@ -683,13 +685,6 @@ "org.mockito:mockito-core" ] }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -702,14 +697,20 @@ "com.jayway.jsonpath:json-path" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.20", + "org.apache.commons:commons-lang3": { + "locked": "3.10" + }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", "transitive": [ - "org.testcontainers:testcontainers" + "com.amazonaws:aws-java-sdk-core" ] }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", @@ -764,9 +765,6 @@ "com.google.guava:guava" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ @@ -854,21 +852,6 @@ "net.minidev:accessors-smart" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.skyscreamer:jsonassert": { "locked": "1.5.0", "transitive": [ @@ -885,13 +868,9 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" + "org.slf4j:jul-to-slf4j" ] }, "org.springframework.boot:spring-boot": { @@ -913,13 +892,9 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, "org.springframework.boot:spring-boot-starter-log4j2": { "locked": "2.3.12.RELEASE" }, @@ -945,9 +920,6 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -958,9 +930,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -975,14 +945,11 @@ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx" + "org.springframework:spring-test" ] }, "org.springframework:spring-expression": { @@ -997,45 +964,12 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:database-commons" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -1047,14 +981,34 @@ "transitive": [ "org.springframework.boot:spring-boot-starter" ] + }, + "software.amazon.ion:ion-java": { + "locked": "1.0.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] } }, "testRuntimeClasspath": { + "com.amazonaws:aws-java-sdk-core": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, + "com.amazonaws:aws-java-sdk-sqs": { + "locked": "1.11.86" + }, + "com.amazonaws:jmespath-java": { + "locked": "1.11.86", + "transitive": [ + "com.amazonaws:aws-java-sdk-sqs" + ] + }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api", "com.netflix.conductor:conductor-core" ] }, @@ -1062,6 +1016,7 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] @@ -1069,26 +1024,23 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "com.amazonaws:jmespath-java", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", + "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { + "locked": "2.11.4", "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" + "com.amazonaws:aws-java-sdk-core" ] }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", "transitive": [ - "org.testcontainers:testcontainers" + "com.netflix.conductor:conductor-core" ] }, "com.google.code.findbugs:jsr305": { @@ -1098,8 +1050,9 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.4.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -1171,10 +1124,10 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", + "commons-codec:commons-codec": { + "locked": "1.14", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" + "org.apache.httpcomponents:httpclient" ] }, "commons-io:commons-io": { @@ -1183,8 +1136,15 @@ "com.netflix.conductor:conductor-core" ] }, + "commons-logging:commons-logging": { + "locked": "1.2", + "transitive": [ + "com.amazonaws:aws-java-sdk-core", + "org.apache.httpcomponents:httpclient" + ] + }, "io.reactivex:rxjava": { - "locked": "1.3.8", + "locked": "1.2.2", "transitive": [ "com.netflix.conductor:conductor-core" ] @@ -1209,11 +1169,16 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "joda-time:joda-time": { + "locked": "2.8.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, "junit:junit": { "locked": "4.13.2", "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" + "org.junit.vintage:junit-vintage-engine" ] }, "net.bytebuddy:byte-buddy": { @@ -1228,13 +1193,6 @@ "org.mockito:mockito-core" ] }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -1254,12 +1212,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ @@ -1267,6 +1219,18 @@ "com.netflix.conductor:conductor-core" ] }, + "org.apache.httpcomponents:httpclient": { + "locked": "4.5.13", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] + }, + "org.apache.httpcomponents:httpcore": { + "locked": "4.4.14", + "transitive": [ + "org.apache.httpcomponents:httpclient" + ] + }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -1334,15 +1298,12 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ @@ -1440,21 +1401,6 @@ "net.minidev:accessors-smart" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, "org.skyscreamer:jsonassert": { "locked": "1.5.0", "transitive": [ @@ -1471,14 +1417,10 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" + "org.slf4j:jul-to-slf4j" ] }, "org.springframework.boot:spring-boot": { @@ -1500,13 +1442,9 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, "org.springframework.boot:spring-boot-starter-log4j2": { "locked": "2.3.12.RELEASE" }, @@ -1532,9 +1470,6 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -1545,9 +1480,7 @@ "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx" + "org.springframework:spring-context" ] }, "org.springframework:spring-context": { @@ -1562,14 +1495,11 @@ "org.springframework.boot:spring-boot", "org.springframework.boot:spring-boot-starter", "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", "org.springframework:spring-aop", "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx" + "org.springframework:spring-test" ] }, "org.springframework:spring-expression": { @@ -1584,45 +1514,12 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:database-commons" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -1634,6 +1531,12 @@ "transitive": [ "org.springframework.boot:spring-boot-starter" ] + }, + "software.amazon.ion:ion-java": { + "locked": "1.0.1", + "transitive": [ + "com.amazonaws:aws-java-sdk-core" + ] } } } \ No newline at end of file diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueConfiguration.java b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueConfiguration.java similarity index 96% rename from contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueConfiguration.java rename to awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueConfiguration.java index 001250211d..2ee425e7cd 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueConfiguration.java +++ b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueConfiguration.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs.config; +package com.netflix.conductor.sqs.config; import java.util.HashMap; import java.util.Map; @@ -22,11 +22,11 @@ import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; -import com.netflix.conductor.contribs.queue.sqs.SQSObservableQueue.Builder; import com.netflix.conductor.core.config.ConductorProperties; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; import com.netflix.conductor.model.TaskModel.Status; +import com.netflix.conductor.sqs.eventqueue.SQSObservableQueue.Builder; import com.amazonaws.auth.AWSCredentialsProvider; import com.amazonaws.services.sqs.AmazonSQSClient; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProperties.java b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProperties.java similarity index 97% rename from contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProperties.java rename to awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProperties.java index ef22920990..7ce5a3e5d1 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProperties.java +++ b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProperties.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs.config; +package com.netflix.conductor.sqs.config; import java.time.Duration; import java.time.temporal.ChronoUnit; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProvider.java b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProvider.java similarity index 92% rename from contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProvider.java rename to awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProvider.java index b7f5001801..5ad88dc69b 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/config/SQSEventQueueProvider.java +++ b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/config/SQSEventQueueProvider.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 Netflix, Inc. + * Copyright 2022 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs.config; +package com.netflix.conductor.sqs.config; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; @@ -19,9 +19,9 @@ import org.slf4j.LoggerFactory; import org.springframework.lang.NonNull; -import com.netflix.conductor.contribs.queue.sqs.SQSObservableQueue.Builder; import com.netflix.conductor.core.events.EventQueueProvider; import com.netflix.conductor.core.events.queue.ObservableQueue; +import com.netflix.conductor.sqs.eventqueue.SQSObservableQueue; import com.amazonaws.services.sqs.AmazonSQSClient; import rx.Scheduler; @@ -56,7 +56,7 @@ public ObservableQueue getQueue(String queueURI) { return queues.computeIfAbsent( queueURI, q -> - new Builder() + new SQSObservableQueue.Builder() .withBatchSize(this.batchSize) .withClient(client) .withPollTimeInMS(this.pollTimeInMS) diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueue.java b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueue.java similarity index 98% rename from contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueue.java rename to awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueue.java index e4603791de..a65cff1d90 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueue.java +++ b/awssqs-event-queue/src/main/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueue.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs; +package com.netflix.conductor.sqs.eventqueue; import java.util.ArrayList; import java.util.Collections; @@ -52,7 +52,6 @@ import com.amazonaws.services.sqs.model.SendMessageBatchRequestEntry; import com.amazonaws.services.sqs.model.SendMessageBatchResult; import com.amazonaws.services.sqs.model.SetQueueAttributesResult; -import com.google.common.annotations.VisibleForTesting; import rx.Observable; import rx.Observable.OnSubscribe; import rx.Scheduler; @@ -238,7 +237,6 @@ public SQSObservableQueue build() { } // Private methods - @VisibleForTesting String getOrCreateQueue() { List queueUrls = listQueues(queueName); if (queueUrls == null || queueUrls.isEmpty()) { @@ -310,7 +308,6 @@ private void publishMessages(List messages) { LOGGER.debug("send result: {} for SQS queue: {}", result.getFailed().toString(), queueName); } - @VisibleForTesting List receiveMessages() { try { ReceiveMessageRequest receiveMessageRequest = @@ -339,7 +336,6 @@ List receiveMessages() { return new ArrayList<>(); } - @VisibleForTesting OnSubscribe getOnSubscribe() { return subscriber -> { Observable interval = Observable.interval(pollTimeInMS, TimeUnit.MILLISECONDS); diff --git a/awssqs-event-queue/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/awssqs-event-queue/src/main/resources/META-INF/additional-spring-configuration-metadata.json new file mode 100644 index 0000000000..2cc76ff652 --- /dev/null +++ b/awssqs-event-queue/src/main/resources/META-INF/additional-spring-configuration-metadata.json @@ -0,0 +1,27 @@ +{ + "properties": [ + { + "name": "conductor.event-queues.sqs.enabled", + "type": "java.lang.Boolean", + "description": "Enable the use of AWS SQS implementation to provide queues for consuming events.", + "sourceType": "com.netflix.conductor.sqs.config.SQSEventQueueConfiguration" + }, + { + "name": "conductor.default-event-queue.type", + "type": "java.lang.String", + "description": "The default event queue type to listen on for the WAIT task.", + "sourceType": "com.netflix.conductor.sqs.config.SQSEventQueueConfiguration" + } + ], + "hints": [ + { + "name": "conductor.default-event-queue.type", + "values": [ + { + "value": "sqs", + "description": "Use AWS SQS as the event queue to listen on for the WAIT task." + } + ] + } + ] +} \ No newline at end of file diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/DefaultEventQueueProcessorTest.java b/awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/DefaultEventQueueProcessorTest.java similarity index 99% rename from contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/DefaultEventQueueProcessorTest.java rename to awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/DefaultEventQueueProcessorTest.java index 8c0d2f7286..ab7be31186 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/DefaultEventQueueProcessorTest.java +++ b/awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/DefaultEventQueueProcessorTest.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs; +package com.netflix.conductor.sqs.eventqueue; import java.util.HashMap; import java.util.LinkedList; diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueueTest.java b/awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueueTest.java similarity index 97% rename from contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueueTest.java rename to awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueueTest.java index 789a90f87b..be0e92fdc5 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/queue/sqs/SQSObservableQueueTest.java +++ b/awssqs-event-queue/src/test/java/com/netflix/conductor/sqs/eventqueue/SQSObservableQueueTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 Netflix, Inc. + * Copyright 2022 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.queue.sqs; +package com.netflix.conductor.sqs.eventqueue; import java.util.Collections; import java.util.LinkedList; diff --git a/azureblob-storage/README.md b/azureblob-storage/README.md deleted file mode 100644 index 33a39349c0..0000000000 --- a/azureblob-storage/README.md +++ /dev/null @@ -1,44 +0,0 @@ -# Azure Blob External Storage Module - -This module use azure blob to store and retrieve workflows/tasks input/output payload that -went over the thresholds defined in properties named `conductor.[workflow|task].[input|output].payload.threshold.kb`. - -**Warning** Azure Java SDK use libs already present inside `conductor` like `jackson` and `netty`. -You may encounter deprecated issues, or conflicts and need to adapt the code if the module is not maintained along with `conductor`. -It has only been tested with **v12.2.0**. - -## Configuration - -### Usage - -Cf. Documentation [External Payload Storage](https://netflix.github.io/conductor/externalpayloadstorage/#azure-blob-storage) - -### Example - -```properties -conductor.additional.modules=com.netflix.conductor.azureblob.AzureBlobModule -es.set.netty.runtime.available.processors=false - -workflow.external.payload.storage=AZURE_BLOB -workflow.external.payload.storage.azure_blob.connection_string=DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;EndpointSuffix=localhost -workflow.external.payload.storage.azure_blob.signedurlexpirationseconds=360 -``` - -## Testing - -You can use [Azurite](https://github.com/Azure/Azurite) to simulate an Azure Storage. - -### Troubleshoots - -* When using **es5 persistance** you will receive an `java.lang.IllegalStateException` because the Netty lib will call `setAvailableProcessors` two times. To resolve this issue you need to set the following system property - -``` -es.set.netty.runtime.available.processors=false -``` - -If you want to change the default HTTP client of azure sdk, you can use `okhttp` instead of `netty`. -For that you need to add the following [dependency](https://github.com/Azure/azure-sdk-for-java/tree/master/sdk/storage/azure-storage-blob#default-http-client). - -``` -com.azure:azure-core-http-okhttp:${compatible version} -``` diff --git a/azureblob-storage/build.gradle b/azureblob-storage/build.gradle deleted file mode 100644 index 3b85566a1c..0000000000 --- a/azureblob-storage/build.gradle +++ /dev/null @@ -1,8 +0,0 @@ -dependencies { - implementation project(':conductor-common') - implementation project(':conductor-core') - compileOnly 'org.springframework.boot:spring-boot-starter' - - implementation "com.azure:azure-storage-blob:${revAzureStorageBlobSdk}" - implementation "org.apache.commons:commons-lang3" -} diff --git a/azureblob-storage/dependencies.lock b/azureblob-storage/dependencies.lock deleted file mode 100644 index b123f7ed5c..0000000000 --- a/azureblob-storage/dependencies.lock +++ /dev/null @@ -1,1943 +0,0 @@ -{ - "annotationProcessor": { - "org.springframework.boot:spring-boot-configuration-processor": { - "locked": "2.3.12.RELEASE" - } - }, - "compileClasspath": { - "com.azure:azure-core": { - "locked": "1.5.1", - "transitive": [ - "com.azure:azure-core-http-netty", - "com.azure:azure-storage-blob", - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-core-http-netty": { - "locked": "1.5.2", - "transitive": [ - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-storage-blob": { - "locked": "12.7.0" - }, - "com.azure:azure-storage-common": { - "locked": "12.7.0", - "transitive": [ - "com.azure:azure-storage-blob" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.fasterxml.woodstox:woodstox-core": { - "locked": "6.2.3", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "io.netty:netty-buffer": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-codec": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-codec-http": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http2", - "io.netty:netty-handler-proxy", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-http2": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-socks": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-common": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-buffer", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-resolver", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-handler": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-handler-proxy": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-resolver": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler", - "io.netty:netty-transport" - ] - }, - "io.netty:netty-tcnative-boringssl-static": { - "locked": "2.0.39.Final", - "transitive": [ - "com.azure:azure-core" - ] - }, - "io.netty:netty-transport": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-transport-native-epoll": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-transport-native-unix-common": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-transport-native-epoll" - ] - }, - "io.projectreactor.netty:reactor-netty": { - "locked": "0.9.20.RELEASE", - "transitive": [ - "com.azure:azure-core-http-netty" - ] - }, - "io.projectreactor:reactor-core": { - "locked": "3.3.17.RELEASE", - "transitive": [ - "com.azure:azure-core", - "io.projectreactor.netty:reactor-netty" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.codehaus.woodstox:stax2-api": { - "locked": "4.2.1", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.woodstox:woodstox-core" - ] - }, - "org.reactivestreams:reactive-streams": { - "locked": "1.0.3", - "transitive": [ - "io.projectreactor:reactor-core" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.azure:azure-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "runtimeClasspath": { - "com.azure:azure-core": { - "locked": "1.5.1", - "transitive": [ - "com.azure:azure-core-http-netty", - "com.azure:azure-storage-blob", - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-core-http-netty": { - "locked": "1.5.2", - "transitive": [ - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-storage-blob": { - "locked": "12.7.0" - }, - "com.azure:azure-storage-common": { - "locked": "12.7.0", - "transitive": [ - "com.azure:azure-storage-blob" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.fasterxml.woodstox:woodstox-core": { - "locked": "6.2.3", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.netty:netty-buffer": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-codec": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-codec-http": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http2", - "io.netty:netty-handler-proxy", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-http2": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-socks": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-common": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-buffer", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-resolver", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-handler": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-handler-proxy": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-resolver": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler", - "io.netty:netty-transport" - ] - }, - "io.netty:netty-tcnative-boringssl-static": { - "locked": "2.0.39.Final", - "transitive": [ - "com.azure:azure-core" - ] - }, - "io.netty:netty-transport": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-transport-native-epoll": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-transport-native-unix-common": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-transport-native-epoll" - ] - }, - "io.projectreactor.netty:reactor-netty": { - "locked": "0.9.20.RELEASE", - "transitive": [ - "com.azure:azure-core-http-netty" - ] - }, - "io.projectreactor:reactor-core": { - "locked": "3.3.17.RELEASE", - "transitive": [ - "com.azure:azure-core", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.codehaus.woodstox:stax2-api": { - "locked": "4.2.1", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.woodstox:woodstox-core" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.reactivestreams:reactive-streams": { - "locked": "1.0.3", - "transitive": [ - "io.projectreactor:reactor-core" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.azure:azure-core", - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "org.apache.logging.log4j:log4j-slf4j-impl" - ] - } - }, - "testCompileClasspath": { - "com.azure:azure-core": { - "locked": "1.5.1", - "transitive": [ - "com.azure:azure-core-http-netty", - "com.azure:azure-storage-blob", - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-core-http-netty": { - "locked": "1.5.2", - "transitive": [ - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-storage-blob": { - "locked": "12.7.0" - }, - "com.azure:azure-storage-common": { - "locked": "12.7.0", - "transitive": [ - "com.azure:azure-storage-blob" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.fasterxml.woodstox:woodstox-core": { - "locked": "6.2.3", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "io.netty:netty-buffer": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-codec": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-codec-http": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http2", - "io.netty:netty-handler-proxy", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-http2": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-socks": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-common": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-buffer", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-resolver", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-handler": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-handler-proxy": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-resolver": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler", - "io.netty:netty-transport" - ] - }, - "io.netty:netty-tcnative-boringssl-static": { - "locked": "2.0.39.Final", - "transitive": [ - "com.azure:azure-core" - ] - }, - "io.netty:netty-transport": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-transport-native-epoll": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-transport-native-unix-common": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-transport-native-epoll" - ] - }, - "io.projectreactor.netty:reactor-netty": { - "locked": "0.9.20.RELEASE", - "transitive": [ - "com.azure:azure-core-http-netty" - ] - }, - "io.projectreactor:reactor-core": { - "locked": "3.3.17.RELEASE", - "transitive": [ - "com.azure:azure-core", - "io.projectreactor.netty:reactor-netty" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.codehaus.woodstox:stax2-api": { - "locked": "4.2.1", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.woodstox:woodstox-core" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-params" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.reactivestreams:reactive-streams": { - "locked": "1.0.3", - "transitive": [ - "io.projectreactor:reactor-core" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.azure:azure-core", - "com.jayway.jsonpath:json-path", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-test" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testRuntimeClasspath": { - "com.azure:azure-core": { - "locked": "1.5.1", - "transitive": [ - "com.azure:azure-core-http-netty", - "com.azure:azure-storage-blob", - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-core-http-netty": { - "locked": "1.5.2", - "transitive": [ - "com.azure:azure-storage-common" - ] - }, - "com.azure:azure-storage-blob": { - "locked": "12.7.0" - }, - "com.azure:azure-storage-common": { - "locked": "12.7.0", - "transitive": [ - "com.azure:azure-storage-blob" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "com.azure:azure-core" - ] - }, - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.fasterxml.woodstox:woodstox-core": { - "locked": "6.2.3", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.netty:netty-buffer": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-codec": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-codec-http": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http2", - "io.netty:netty-handler-proxy", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-http2": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-codec-socks": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler-proxy" - ] - }, - "io.netty:netty-common": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-buffer", - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-resolver", - "io.netty:netty-transport", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-handler": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-handler-proxy": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-resolver": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-handler", - "io.netty:netty-transport" - ] - }, - "io.netty:netty-tcnative-boringssl-static": { - "locked": "2.0.39.Final", - "transitive": [ - "com.azure:azure-core" - ] - }, - "io.netty:netty-transport": { - "locked": "4.1.65.Final", - "transitive": [ - "io.netty:netty-codec", - "io.netty:netty-codec-http", - "io.netty:netty-codec-http2", - "io.netty:netty-codec-socks", - "io.netty:netty-handler", - "io.netty:netty-handler-proxy", - "io.netty:netty-transport-native-epoll", - "io.netty:netty-transport-native-unix-common" - ] - }, - "io.netty:netty-transport-native-epoll": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.netty:netty-transport-native-unix-common": { - "locked": "4.1.65.Final", - "transitive": [ - "com.azure:azure-core-http-netty", - "io.netty:netty-transport-native-epoll" - ] - }, - "io.projectreactor.netty:reactor-netty": { - "locked": "0.9.20.RELEASE", - "transitive": [ - "com.azure:azure-core-http-netty" - ] - }, - "io.projectreactor:reactor-core": { - "locked": "3.3.17.RELEASE", - "transitive": [ - "com.azure:azure-core", - "io.projectreactor.netty:reactor-netty" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.fasterxml.jackson.module:jackson-module-jaxb-annotations", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.codehaus.woodstox:stax2-api": { - "locked": "4.2.1", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-xml", - "com.fasterxml.woodstox:woodstox-core" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.mockito:mockito-junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-engine": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.reactivestreams:reactive-streams": { - "locked": "1.0.3", - "transitive": [ - "io.projectreactor:reactor-core" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.azure:azure-core", - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-test" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - } -} \ No newline at end of file diff --git a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java b/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java deleted file mode 100644 index b5160484cd..0000000000 --- a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobConfiguration.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.azureblob.config; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.azureblob.storage.AzureBlobPayloadStorage; -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.core.utils.IDGenerator; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(AzureBlobProperties.class) -@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "azureblob") -public class AzureBlobConfiguration { - - @Bean - public ExternalPayloadStorage azureBlobExternalPayloadStorage( - IDGenerator idGenerator, AzureBlobProperties properties) { - return new AzureBlobPayloadStorage(idGenerator, properties); - } -} diff --git a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java b/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java deleted file mode 100644 index 9a1f4fbf96..0000000000 --- a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/config/AzureBlobProperties.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.azureblob.config; - -import java.time.Duration; -import java.time.temporal.ChronoUnit; - -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; - -@ConfigurationProperties("conductor.external-payload-storage.azureblob") -public class AzureBlobProperties { - - /** The connection string to be used to connect to Azure Blob storage */ - private String connectionString = null; - - /** The name of the container where the payloads will be stored */ - private String containerName = "conductor-payloads"; - - /** The endpoint to be used to connect to Azure Blob storage */ - private String endpoint = null; - - /** The sas token to be used for authenticating requests */ - private String sasToken = null; - - /** The time for which the shared access signature is valid */ - @DurationUnit(ChronoUnit.SECONDS) - private Duration signedUrlExpirationDuration = Duration.ofSeconds(5); - - /** The path at which the workflow inputs will be stored */ - private String workflowInputPath = "workflow/input/"; - - /** The path at which the workflow outputs will be stored */ - private String workflowOutputPath = "workflow/output/"; - - /** The path at which the task inputs will be stored */ - private String taskInputPath = "task/input/"; - - /** The path at which the task outputs will be stored */ - private String taskOutputPath = "task/output/"; - - public String getConnectionString() { - return connectionString; - } - - public void setConnectionString(String connectionString) { - this.connectionString = connectionString; - } - - public String getContainerName() { - return containerName; - } - - public void setContainerName(String containerName) { - this.containerName = containerName; - } - - public String getEndpoint() { - return endpoint; - } - - public void setEndpoint(String endpoint) { - this.endpoint = endpoint; - } - - public String getSasToken() { - return sasToken; - } - - public void setSasToken(String sasToken) { - this.sasToken = sasToken; - } - - public Duration getSignedUrlExpirationDuration() { - return signedUrlExpirationDuration; - } - - public void setSignedUrlExpirationDuration(Duration signedUrlExpirationDuration) { - this.signedUrlExpirationDuration = signedUrlExpirationDuration; - } - - public String getWorkflowInputPath() { - return workflowInputPath; - } - - public void setWorkflowInputPath(String workflowInputPath) { - this.workflowInputPath = workflowInputPath; - } - - public String getWorkflowOutputPath() { - return workflowOutputPath; - } - - public void setWorkflowOutputPath(String workflowOutputPath) { - this.workflowOutputPath = workflowOutputPath; - } - - public String getTaskInputPath() { - return taskInputPath; - } - - public void setTaskInputPath(String taskInputPath) { - this.taskInputPath = taskInputPath; - } - - public String getTaskOutputPath() { - return taskOutputPath; - } - - public void setTaskOutputPath(String taskOutputPath) { - this.taskOutputPath = taskOutputPath; - } -} diff --git a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java b/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java deleted file mode 100644 index 2a05b8e4d1..0000000000 --- a/azureblob-storage/src/main/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorage.java +++ /dev/null @@ -1,230 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.azureblob.storage; - -import java.io.ByteArrayInputStream; -import java.io.ByteArrayOutputStream; -import java.io.InputStream; -import java.io.UncheckedIOException; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.azureblob.config.AzureBlobProperties; -import com.netflix.conductor.common.run.ExternalStorageLocation; -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.core.utils.IDGenerator; - -import com.azure.core.exception.UnexpectedLengthException; -import com.azure.core.util.Context; -import com.azure.storage.blob.BlobContainerClient; -import com.azure.storage.blob.BlobContainerClientBuilder; -import com.azure.storage.blob.models.BlobHttpHeaders; -import com.azure.storage.blob.models.BlobStorageException; -import com.azure.storage.blob.sas.BlobSasPermission; -import com.azure.storage.blob.sas.BlobServiceSasSignatureValues; -import com.azure.storage.blob.specialized.BlockBlobClient; -import com.azure.storage.common.Utility; -import com.azure.storage.common.implementation.credentials.SasTokenCredential; - -/** - * An implementation of {@link ExternalPayloadStorage} using Azure Blob for storing large JSON - * payload data. - * - * @see Azure Java SDK - */ -public class AzureBlobPayloadStorage implements ExternalPayloadStorage { - - private static final Logger LOGGER = LoggerFactory.getLogger(AzureBlobPayloadStorage.class); - private static final String CONTENT_TYPE = "application/json"; - - private final IDGenerator idGenerator; - private final String workflowInputPath; - private final String workflowOutputPath; - private final String taskInputPath; - private final String taskOutputPath; - - private final BlobContainerClient blobContainerClient; - private final long expirationSec; - private final SasTokenCredential sasTokenCredential; - - public AzureBlobPayloadStorage(IDGenerator idGenerator, AzureBlobProperties properties) { - this.idGenerator = idGenerator; - workflowInputPath = properties.getWorkflowInputPath(); - workflowOutputPath = properties.getWorkflowOutputPath(); - taskInputPath = properties.getTaskInputPath(); - taskOutputPath = properties.getTaskOutputPath(); - expirationSec = properties.getSignedUrlExpirationDuration().getSeconds(); - String connectionString = properties.getConnectionString(); - String containerName = properties.getContainerName(); - String endpoint = properties.getEndpoint(); - String sasToken = properties.getSasToken(); - - BlobContainerClientBuilder blobContainerClientBuilder = new BlobContainerClientBuilder(); - if (connectionString != null) { - blobContainerClientBuilder.connectionString(connectionString); - sasTokenCredential = null; - } else if (endpoint != null) { - blobContainerClientBuilder.endpoint(endpoint); - if (sasToken != null) { - sasTokenCredential = SasTokenCredential.fromSasTokenString(sasToken); - blobContainerClientBuilder.sasToken(sasTokenCredential.getSasToken()); - } else { - sasTokenCredential = null; - } - } else { - String msg = "Missing property for connectionString OR endpoint"; - LOGGER.error(msg); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg); - } - blobContainerClient = blobContainerClientBuilder.containerName(containerName).buildClient(); - } - - /** - * @param operation the type of {@link Operation} to be performed - * @param payloadType the {@link PayloadType} that is being accessed - * @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the - * azure blob name for the json payload - */ - @Override - public ExternalStorageLocation getLocation( - Operation operation, PayloadType payloadType, String path) { - try { - ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation(); - - String objectKey; - if (StringUtils.isNotBlank(path)) { - objectKey = path; - } else { - objectKey = getObjectKey(payloadType); - } - externalStorageLocation.setPath(objectKey); - - BlockBlobClient blockBlobClient = - blobContainerClient.getBlobClient(objectKey).getBlockBlobClient(); - String blobUrl = Utility.urlDecode(blockBlobClient.getBlobUrl()); - - if (sasTokenCredential != null) { - blobUrl = blobUrl + "?" + sasTokenCredential.getSasToken(); - } else { - BlobSasPermission blobSASPermission = new BlobSasPermission(); - if (operation.equals(Operation.READ)) { - blobSASPermission.setReadPermission(true); - } else if (operation.equals(Operation.WRITE)) { - blobSASPermission.setWritePermission(true); - blobSASPermission.setCreatePermission(true); - } - BlobServiceSasSignatureValues blobServiceSasSignatureValues = - new BlobServiceSasSignatureValues( - OffsetDateTime.now(ZoneOffset.UTC).plusSeconds(expirationSec), - blobSASPermission); - blobUrl = - blobUrl + "?" + blockBlobClient.generateSas(blobServiceSasSignatureValues); - } - - externalStorageLocation.setUri(blobUrl); - return externalStorageLocation; - } catch (BlobStorageException e) { - String msg = "Error communicating with Azure"; - LOGGER.error(msg, e); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); - } - } - - /** - * Uploads the payload to the given azure blob name. It is expected that the caller retrieves - * the blob name using {@link #getLocation(Operation, PayloadType, String)} before making this - * call. - * - * @param path the name of the blob to be uploaded - * @param payload an {@link InputStream} containing the json payload which is to be uploaded - * @param payloadSize the size of the json payload in bytes - */ - @Override - public void upload(String path, InputStream payload, long payloadSize) { - try { - BlockBlobClient blockBlobClient = - blobContainerClient.getBlobClient(path).getBlockBlobClient(); - BlobHttpHeaders blobHttpHeaders = new BlobHttpHeaders().setContentType(CONTENT_TYPE); - blockBlobClient.uploadWithResponse( - payload, - payloadSize, - blobHttpHeaders, - null, - null, - null, - null, - null, - Context.NONE); - } catch (BlobStorageException | UncheckedIOException | UnexpectedLengthException e) { - String msg = "Error communicating with Azure"; - LOGGER.error(msg, e); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); - } - } - - /** - * Downloads the payload stored in an azure blob. - * - * @param path the path of the blob - * @return an input stream containing the contents of the object Caller is expected to close the - * input stream. - */ - @Override - public InputStream download(String path) { - try { - BlockBlobClient blockBlobClient = - blobContainerClient.getBlobClient(path).getBlockBlobClient(); - ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); - // Avoid another call to the api to get the blob size - // ByteArrayOutputStream outputStream = new - // ByteArrayOutputStream(blockBlobClient.getProperties().value().blobSize()); - blockBlobClient.download(outputStream); - return new ByteArrayInputStream(outputStream.toByteArray()); - } catch (BlobStorageException | UncheckedIOException | NullPointerException e) { - String msg = "Error communicating with Azure"; - LOGGER.error(msg, e); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); - } - } - - /** - * Build path on external storage. Copied from S3PayloadStorage. - * - * @param payloadType the {@link PayloadType} which will determine the base path of the object - * @return External Storage path - */ - private String getObjectKey(PayloadType payloadType) { - StringBuilder stringBuilder = new StringBuilder(); - switch (payloadType) { - case WORKFLOW_INPUT: - stringBuilder.append(workflowInputPath); - break; - case WORKFLOW_OUTPUT: - stringBuilder.append(workflowOutputPath); - break; - case TASK_INPUT: - stringBuilder.append(taskInputPath); - break; - case TASK_OUTPUT: - stringBuilder.append(taskOutputPath); - break; - } - stringBuilder.append(idGenerator.generate()).append(".json"); - return stringBuilder.toString(); - } -} diff --git a/azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java b/azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java deleted file mode 100644 index 6fa2091590..0000000000 --- a/azureblob-storage/src/test/java/com/netflix/conductor/azureblob/storage/AzureBlobPayloadStorageTest.java +++ /dev/null @@ -1,158 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.azureblob.storage; - -import java.time.Duration; - -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; - -import com.netflix.conductor.azureblob.config.AzureBlobProperties; -import com.netflix.conductor.common.run.ExternalStorageLocation; -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.core.utils.IDGenerator; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class AzureBlobPayloadStorageTest { - - private AzureBlobProperties properties; - - private IDGenerator idGenerator; - - @Before - public void setUp() { - properties = mock(AzureBlobProperties.class); - idGenerator = new IDGenerator(); - when(properties.getConnectionString()).thenReturn(null); - when(properties.getContainerName()).thenReturn("conductor-payloads"); - when(properties.getEndpoint()).thenReturn(null); - when(properties.getSasToken()).thenReturn(null); - when(properties.getSignedUrlExpirationDuration()).thenReturn(Duration.ofSeconds(5)); - when(properties.getWorkflowInputPath()).thenReturn("workflow/input/"); - when(properties.getWorkflowOutputPath()).thenReturn("workflow/output/"); - when(properties.getTaskInputPath()).thenReturn("task/input"); - when(properties.getTaskOutputPath()).thenReturn("task/output/"); - } - - /** Dummy credentials Azure SDK doesn't work with Azurite since it cleans parameters */ - private final String azuriteConnectionString = - "DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://127.0.0.1:10000/devstoreaccount1;EndpointSuffix=localhost"; - - @Rule public ExpectedException expectedException = ExpectedException.none(); - - @Test - public void testNoStorageAccount() { - expectedException.expect(ApplicationException.class); - new AzureBlobPayloadStorage(idGenerator, properties); - } - - @Test - public void testUseConnectionString() { - when(properties.getConnectionString()).thenReturn(azuriteConnectionString); - new AzureBlobPayloadStorage(idGenerator, properties); - } - - @Test - public void testUseEndpoint() { - String azuriteEndpoint = "http://127.0.0.1:10000/"; - when(properties.getEndpoint()).thenReturn(azuriteEndpoint); - new AzureBlobPayloadStorage(idGenerator, properties); - } - - @Test - public void testGetLocationFixedPath() { - when(properties.getConnectionString()).thenReturn(azuriteConnectionString); - AzureBlobPayloadStorage azureBlobPayloadStorage = - new AzureBlobPayloadStorage(idGenerator, properties); - String path = "somewhere"; - ExternalStorageLocation externalStorageLocation = - azureBlobPayloadStorage.getLocation( - ExternalPayloadStorage.Operation.READ, - ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT, - path); - assertNotNull(externalStorageLocation); - assertEquals(path, externalStorageLocation.getPath()); - assertNotNull(externalStorageLocation.getUri()); - } - - private void testGetLocation( - AzureBlobPayloadStorage azureBlobPayloadStorage, - ExternalPayloadStorage.Operation operation, - ExternalPayloadStorage.PayloadType payloadType, - String expectedPath) { - ExternalStorageLocation externalStorageLocation = - azureBlobPayloadStorage.getLocation(operation, payloadType, null); - assertNotNull(externalStorageLocation); - assertNotNull(externalStorageLocation.getPath()); - assertTrue(externalStorageLocation.getPath().startsWith(expectedPath)); - assertNotNull(externalStorageLocation.getUri()); - assertTrue(externalStorageLocation.getUri().contains(expectedPath)); - } - - @Test - public void testGetAllLocations() { - when(properties.getConnectionString()).thenReturn(azuriteConnectionString); - AzureBlobPayloadStorage azureBlobPayloadStorage = - new AzureBlobPayloadStorage(idGenerator, properties); - - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.READ, - ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT, - properties.getWorkflowInputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.READ, - ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT, - properties.getWorkflowOutputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.READ, - ExternalPayloadStorage.PayloadType.TASK_INPUT, - properties.getTaskInputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.READ, - ExternalPayloadStorage.PayloadType.TASK_OUTPUT, - properties.getTaskOutputPath()); - - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.WRITE, - ExternalPayloadStorage.PayloadType.WORKFLOW_INPUT, - properties.getWorkflowInputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.WRITE, - ExternalPayloadStorage.PayloadType.WORKFLOW_OUTPUT, - properties.getWorkflowOutputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.WRITE, - ExternalPayloadStorage.PayloadType.TASK_INPUT, - properties.getTaskInputPath()); - testGetLocation( - azureBlobPayloadStorage, - ExternalPayloadStorage.Operation.WRITE, - ExternalPayloadStorage.PayloadType.TASK_OUTPUT, - properties.getTaskOutputPath()); - } -} diff --git a/build.gradle b/build.gradle index ffb66ca11a..3314676e1c 100644 --- a/build.gradle +++ b/build.gradle @@ -155,9 +155,9 @@ allprojects { jacocoTestReport { reports { - html.enabled = true - xml.enabled = true - csv.enabled = false + html.required = true + xml.required = true + csv.required = false } } diff --git a/cassandra-persistence/dependencies.lock b/cassandra-persistence/dependencies.lock index c4de7b73d7..9cd49a6e1b 100644 --- a/cassandra-persistence/dependencies.lock +++ b/cassandra-persistence/dependencies.lock @@ -299,6 +299,12 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.jnr:jffi": { "locked": "1.2.16", "transitive": [ @@ -330,6 +336,12 @@ "com.github.jnr:jnr-ffi" ] }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.guava:guava": { "locked": "19.0", "transitive": [ @@ -523,6 +535,12 @@ "com.netflix.conductor:conductor-core" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ @@ -1402,6 +1420,12 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.docker-java:docker-java-api": { "locked": "3.2.8", "transitive": [ @@ -1451,6 +1475,12 @@ "com.github.jnr:jnr-ffi" ] }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.guava:guava": { "locked": "19.0", "transitive": [ @@ -1762,6 +1792,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.groovy:groovy": { "locked": "2.5.14", "transitive": [ diff --git a/common/build.gradle b/common/build.gradle index 0d2aab4e0e..87d4212ea4 100644 --- a/common/build.gradle +++ b/common/build.gradle @@ -38,7 +38,7 @@ dependencies { task protogen(dependsOn: jar, type: JavaExec) { classpath configurations.annotationsProcessorCodegen - main = 'com.netflix.conductor.annotationsprocessor.protogen.ProtoGenTask' + mainClass = "com.netflix.conductor.annotationsprocessor.protogen.ProtoGenTask" args( "conductor.proto", "com.netflix.conductor.proto", diff --git a/contribs/build.gradle b/contribs/build.gradle deleted file mode 100644 index 93ea5b8e6c..0000000000 --- a/contribs/build.gradle +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -dependencies { - implementation project(':conductor-common') - implementation project(':conductor-core') - compileOnly 'org.springframework.boot:spring-boot-starter' - compileOnly 'org.springframework.boot:spring-boot-starter-web' - - implementation "com.amazonaws:aws-java-sdk-s3:${revAwsSdk}" - implementation "com.amazonaws:aws-java-sdk-sqs:${revAwsSdk}" - - implementation "org.apache.commons:commons-lang3:" - - implementation "net.thisptr:jackson-jq:${revJq}" - // SBMTODO: remove guava dep - implementation "com.google.guava:guava:${revGuava}" - - implementation "javax.ws.rs:jsr311-api:${revJsr311Api}" - - implementation "org.apache.kafka:kafka-clients:${revKafka}" - - implementation "com.rabbitmq:amqp-client:${revAmqpClient}" - - implementation "io.nats:java-nats-streaming:${revNatsStreaming}" - - implementation "io.reactivex:rxjava:${revRxJava}" - - implementation "com.netflix.spectator:spectator-reg-metrics3:${revSpectator}" - implementation "com.netflix.spectator:spectator-reg-micrometer:${revSpectator}" - implementation "io.prometheus:simpleclient:${revPrometheus}" - implementation "io.micrometer:micrometer-registry-prometheus:${revMicrometer}" - - testImplementation 'org.springframework.boot:spring-boot-starter-web' - testImplementation "org.testcontainers:mockserver:${revTestContainer}" - testImplementation "org.mock-server:mockserver-client-java:${revMockServerClient}" - - testImplementation project(':conductor-common').sourceSets.test.output -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java deleted file mode 100644 index 69b50d6a38..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWithTTLWorkflowStatusListener.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.archive; - -import java.util.concurrent.ScheduledThreadPoolExecutor; -import java.util.concurrent.TimeUnit; - -import javax.annotation.PreDestroy; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.dal.ExecutionDAOFacade; -import com.netflix.conductor.core.listener.WorkflowStatusListener; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.model.WorkflowModel; - -public class ArchivingWithTTLWorkflowStatusListener implements WorkflowStatusListener { - - private static final Logger LOGGER = - LoggerFactory.getLogger(ArchivingWithTTLWorkflowStatusListener.class); - - private final ExecutionDAOFacade executionDAOFacade; - private final int archiveTTLSeconds; - private final int delayArchiveSeconds; - private final ScheduledThreadPoolExecutor scheduledThreadPoolExecutor; - - public ArchivingWithTTLWorkflowStatusListener( - ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) { - this.executionDAOFacade = executionDAOFacade; - this.archiveTTLSeconds = (int) properties.getTtlDuration().getSeconds(); - this.delayArchiveSeconds = properties.getWorkflowArchivalDelay(); - - this.scheduledThreadPoolExecutor = - new ScheduledThreadPoolExecutor( - properties.getDelayQueueWorkerThreadCount(), - (runnable, executor) -> { - LOGGER.warn( - "Request {} to delay archiving index dropped in executor {}", - runnable, - executor); - Monitors.recordDiscardedArchivalCount(); - }); - this.scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true); - } - - @PreDestroy - public void shutdownExecutorService() { - try { - LOGGER.info("Gracefully shutdown executor service"); - scheduledThreadPoolExecutor.shutdown(); - if (scheduledThreadPoolExecutor.awaitTermination( - delayArchiveSeconds, TimeUnit.SECONDS)) { - LOGGER.debug("tasks completed, shutting down"); - } else { - LOGGER.warn("Forcing shutdown after waiting for {} seconds", delayArchiveSeconds); - scheduledThreadPoolExecutor.shutdownNow(); - } - } catch (InterruptedException ie) { - LOGGER.warn( - "Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue"); - scheduledThreadPoolExecutor.shutdownNow(); - Thread.currentThread().interrupt(); - } - } - - @Override - public void onWorkflowCompleted(WorkflowModel workflow) { - LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId()); - if (delayArchiveSeconds > 0) { - scheduledThreadPoolExecutor.schedule( - new DelayArchiveWorkflow(workflow, executionDAOFacade), - delayArchiveSeconds, - TimeUnit.SECONDS); - } else { - this.executionDAOFacade.removeWorkflowWithExpiry( - workflow.getWorkflowId(), true, archiveTTLSeconds); - Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); - } - } - - @Override - public void onWorkflowTerminated(WorkflowModel workflow) { - LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId()); - if (delayArchiveSeconds > 0) { - scheduledThreadPoolExecutor.schedule( - new DelayArchiveWorkflow(workflow, executionDAOFacade), - delayArchiveSeconds, - TimeUnit.SECONDS); - } else { - this.executionDAOFacade.removeWorkflowWithExpiry( - workflow.getWorkflowId(), true, archiveTTLSeconds); - Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); - } - } - - private class DelayArchiveWorkflow implements Runnable { - - private final String workflowId; - private final String workflowName; - private final WorkflowModel.Status status; - private final ExecutionDAOFacade executionDAOFacade; - - DelayArchiveWorkflow(WorkflowModel workflow, ExecutionDAOFacade executionDAOFacade) { - this.workflowId = workflow.getWorkflowId(); - this.workflowName = workflow.getWorkflowName(); - this.status = workflow.getStatus(); - this.executionDAOFacade = executionDAOFacade; - } - - @Override - public void run() { - try { - this.executionDAOFacade.removeWorkflowWithExpiry( - workflowId, true, archiveTTLSeconds); - LOGGER.info("Archived workflow {}", workflowId); - Monitors.recordWorkflowArchived(workflowName, status); - Monitors.recordArchivalDelayQueueSize( - scheduledThreadPoolExecutor.getQueue().size()); - } catch (Exception e) { - LOGGER.error("Unable to archive workflow: {}", workflowId, e); - } - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java deleted file mode 100644 index c98c0c173b..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerConfiguration.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.archive; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.core.dal.ExecutionDAOFacade; -import com.netflix.conductor.core.listener.WorkflowStatusListener; - -@Configuration -@EnableConfigurationProperties(ArchivingWorkflowListenerProperties.class) -@ConditionalOnProperty(name = "conductor.workflow-status-listener.type", havingValue = "archive") -public class ArchivingWorkflowListenerConfiguration { - - @Bean - public WorkflowStatusListener getWorkflowStatusListener( - ExecutionDAOFacade executionDAOFacade, ArchivingWorkflowListenerProperties properties) { - if (properties.getTtlDuration().getSeconds() > 0) { - return new ArchivingWithTTLWorkflowStatusListener(executionDAOFacade, properties); - } else { - return new ArchivingWorkflowStatusListener(executionDAOFacade); - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java deleted file mode 100644 index dfd57d3017..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowListenerProperties.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.archive; - -import java.time.Duration; -import java.time.temporal.ChronoUnit; - -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; -import org.springframework.core.env.Environment; - -@ConfigurationProperties("conductor.workflow-status-listener.archival") -public class ArchivingWorkflowListenerProperties { - - private final Environment environment; - - @Autowired - public ArchivingWorkflowListenerProperties(Environment environment) { - this.environment = environment; - } - - /** - * The time to live in seconds for workflow archiving module. Currently, only RedisExecutionDAO - * supports this - */ - @DurationUnit(ChronoUnit.SECONDS) - private Duration ttlDuration = Duration.ZERO; - - /** The number of threads to process the delay queue in workflow archival */ - private int delayQueueWorkerThreadCount = 5; - - public Duration getTtlDuration() { - return ttlDuration; - } - - public void setTtlDuration(Duration ttlDuration) { - this.ttlDuration = ttlDuration; - } - - public int getDelayQueueWorkerThreadCount() { - return delayQueueWorkerThreadCount; - } - - public void setDelayQueueWorkerThreadCount(int delayQueueWorkerThreadCount) { - this.delayQueueWorkerThreadCount = delayQueueWorkerThreadCount; - } - - /** The time to delay the archival of workflow */ - public int getWorkflowArchivalDelay() { - return environment.getProperty( - "conductor.workflow-status-listener.archival.delaySeconds", - Integer.class, - environment.getProperty( - "conductor.app.asyncUpdateDelaySeconds", Integer.class, 60)); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java deleted file mode 100644 index f1fe98cca8..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/archive/ArchivingWorkflowStatusListener.java +++ /dev/null @@ -1,52 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.archive; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.dal.ExecutionDAOFacade; -import com.netflix.conductor.core.listener.WorkflowStatusListener; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.model.WorkflowModel; - -/** - * Provides default implementation of workflow archiving immediately after workflow is completed or - * terminated. - * - * @author pavel.halabala - */ -public class ArchivingWorkflowStatusListener implements WorkflowStatusListener { - - private static final Logger LOGGER = - LoggerFactory.getLogger(ArchivingWorkflowStatusListener.class); - private final ExecutionDAOFacade executionDAOFacade; - - public ArchivingWorkflowStatusListener(ExecutionDAOFacade executionDAOFacade) { - this.executionDAOFacade = executionDAOFacade; - } - - @Override - public void onWorkflowCompleted(WorkflowModel workflow) { - LOGGER.info("Archiving workflow {} on completion ", workflow.getWorkflowId()); - this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); - Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); - } - - @Override - public void onWorkflowTerminated(WorkflowModel workflow) { - LOGGER.info("Archiving workflow {} on termination", workflow.getWorkflowId()); - this.executionDAOFacade.removeWorkflow(workflow.getWorkflowId(), true); - Monitors.recordWorkflowArchived(workflow.getWorkflowName(), workflow.getStatus()); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java deleted file mode 100644 index 5f85e876ee..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisher.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.conductorqueue; - -import java.util.Collections; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.listener.WorkflowStatusListener; -import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.model.WorkflowModel; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -/** - * Publishes a {@link Message} containing a {@link WorkflowSummary} to the undlerying {@link - * QueueDAO} implementation on a workflow completion or termination event. - */ -public class ConductorQueueStatusPublisher implements WorkflowStatusListener { - - private static final Logger LOGGER = - LoggerFactory.getLogger(ConductorQueueStatusPublisher.class); - private final QueueDAO queueDAO; - private final ObjectMapper objectMapper; - - private final String successStatusQueue; - private final String failureStatusQueue; - private final String finalizeStatusQueue; - - public ConductorQueueStatusPublisher( - QueueDAO queueDAO, - ObjectMapper objectMapper, - ConductorQueueStatusPublisherProperties properties) { - this.queueDAO = queueDAO; - this.objectMapper = objectMapper; - this.successStatusQueue = properties.getSuccessQueue(); - this.failureStatusQueue = properties.getFailureQueue(); - this.finalizeStatusQueue = properties.getFinalizeQueue(); - } - - @Override - public void onWorkflowCompleted(WorkflowModel workflow) { - LOGGER.info("Publishing callback of workflow {} on completion ", workflow.getWorkflowId()); - queueDAO.push(successStatusQueue, Collections.singletonList(workflowToMessage(workflow))); - } - - @Override - public void onWorkflowTerminated(WorkflowModel workflow) { - LOGGER.info("Publishing callback of workflow {} on termination", workflow.getWorkflowId()); - queueDAO.push(failureStatusQueue, Collections.singletonList(workflowToMessage(workflow))); - } - - @Override - public void onWorkflowFinalized(WorkflowModel workflow) { - LOGGER.info("Publishing callback of workflow {} on finalization", workflow.getWorkflowId()); - queueDAO.push(finalizeStatusQueue, Collections.singletonList(workflowToMessage(workflow))); - } - - private Message workflowToMessage(WorkflowModel workflowModel) { - String jsonWfSummary; - WorkflowSummary summary = new WorkflowSummary(workflowModel.toWorkflow()); - try { - jsonWfSummary = objectMapper.writeValueAsString(summary); - } catch (JsonProcessingException e) { - LOGGER.error( - "Failed to convert WorkflowSummary: {} to String. Exception: {}", summary, e); - throw new RuntimeException(e); - } - return new Message(workflowModel.getWorkflowId(), jsonWfSummary, null); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java deleted file mode 100644 index df4e694045..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherConfiguration.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.conductorqueue; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.core.listener.WorkflowStatusListener; -import com.netflix.conductor.dao.QueueDAO; - -import com.fasterxml.jackson.databind.ObjectMapper; - -@Configuration -@EnableConfigurationProperties(ConductorQueueStatusPublisherProperties.class) -@ConditionalOnProperty( - name = "conductor.workflow-status-listener.type", - havingValue = "queue_publisher") -public class ConductorQueueStatusPublisherConfiguration { - - @Bean - public WorkflowStatusListener getWorkflowStatusListener( - QueueDAO queueDAO, - ConductorQueueStatusPublisherProperties properties, - ObjectMapper objectMapper) { - return new ConductorQueueStatusPublisher(queueDAO, objectMapper, properties); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java b/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java deleted file mode 100644 index ea9a53f743..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/listener/conductorqueue/ConductorQueueStatusPublisherProperties.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener.conductorqueue; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -@ConfigurationProperties("conductor.workflow-status-listener.queue-publisher") -public class ConductorQueueStatusPublisherProperties { - - private String successQueue = "_callbackSuccessQueue"; - - private String failureQueue = "_callbackFailureQueue"; - - private String finalizeQueue = "_callbackFinalizeQueue"; - - public String getSuccessQueue() { - return successQueue; - } - - public void setSuccessQueue(String successQueue) { - this.successQueue = successQueue; - } - - public String getFailureQueue() { - return failureQueue; - } - - public void setFailureQueue(String failureQueue) { - this.failureQueue = failureQueue; - } - - public String getFinalizeQueue() { - return finalizeQueue; - } - - public void setFinalizeQueue(String finalizeQueue) { - this.finalizeQueue = finalizeQueue; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/DatadogMetricsConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/metrics/DatadogMetricsConfiguration.java deleted file mode 100644 index 32ca11edc1..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/DatadogMetricsConfiguration.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Configuration; - -import com.netflix.spectator.api.Spectator; -import com.netflix.spectator.micrometer.MicrometerRegistry; - -import io.micrometer.core.instrument.MeterRegistry; - -/** - * Metrics Datadog module, sending all metrics to a Datadog server. - * - *

Enable in config: conductor.metrics-datadog.enabled=true - * - *

Make sure your dependencies include both micrometer-registry-datadog & - * spring-boot-starter-actuator - */ -@ConditionalOnProperty(value = "conductor.metrics-datadog.enabled", havingValue = "true") -@Configuration -public class DatadogMetricsConfiguration { - - public DatadogMetricsConfiguration(MeterRegistry meterRegistry) { - final MicrometerRegistry metricsRegistry = new MicrometerRegistry(meterRegistry); - Spectator.globalRegistry().add(metricsRegistry); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java deleted file mode 100644 index 33b66faffb..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfiguration.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import java.time.Duration; -import java.util.concurrent.TimeUnit; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.codahale.metrics.MetricRegistry; -import com.codahale.metrics.Slf4jReporter; - -/** - * Metrics logging reporter, dumping all metrics into an Slf4J logger. - * - *

Enable in config: conductor.metrics-logger.enabled=true - * - *

additional config: conductor.metrics-logger.reportInterval=15s - */ -@ConditionalOnProperty(value = "conductor.metrics-logger.enabled", havingValue = "true") -@Configuration -public class LoggingMetricsConfiguration { - - private static final Logger LOGGER = LoggerFactory.getLogger(LoggingMetricsConfiguration.class); - - // Dedicated logger for metrics - // This way one can cleanly separate the metrics stream from rest of the logs - private static final Logger METRICS_LOGGER = LoggerFactory.getLogger("ConductorMetrics"); - - @Value("${conductor.metrics-logger.reportInterval:#{T(java.time.Duration).ofSeconds(30)}}") - private Duration reportInterval; - - @Bean - public Slf4jReporter getSl4jReporter(MetricRegistry metricRegistry) { - return new Slf4jReporterProvider(metricRegistry, reportInterval.getSeconds()).getReporter(); - } - - static class Slf4jReporterProvider { - - private final long metricsReportInterval; - private final MetricRegistry metrics3Registry; - private final Logger logger; - - Slf4jReporterProvider(MetricRegistry metricRegistry, long reportInterval) { - this(metricRegistry, METRICS_LOGGER, reportInterval); - } - - Slf4jReporterProvider( - MetricRegistry metricRegistry, Logger outputLogger, long metricsReportInterval) { - this.metrics3Registry = metricRegistry; - this.logger = outputLogger; - this.metricsReportInterval = metricsReportInterval; - } - - public Slf4jReporter getReporter() { - final Slf4jReporter reporter = - Slf4jReporter.forRegistry(metrics3Registry) - .outputTo(logger) - .convertRatesTo(TimeUnit.SECONDS) - .convertDurationsTo(TimeUnit.MILLISECONDS) - .build(); - - reporter.start(metricsReportInterval, TimeUnit.SECONDS); - LOGGER.info( - "Logging metrics reporter started, reporting every {} seconds", - metricsReportInterval); - return reporter; - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/MetricsRegistryConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/metrics/MetricsRegistryConfiguration.java deleted file mode 100644 index f28e1caa05..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/MetricsRegistryConfiguration.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.spectator.api.Clock; -import com.netflix.spectator.api.Spectator; -import com.netflix.spectator.metrics3.MetricsRegistry; - -import com.codahale.metrics.MetricRegistry; - -@ConditionalOnProperty(value = "conductor.metrics-logger.enabled", havingValue = "true") -@Configuration -public class MetricsRegistryConfiguration { - - public static final MetricRegistry METRIC_REGISTRY = new MetricRegistry(); - public static final MetricsRegistry METRICS_REGISTRY = - new MetricsRegistry(Clock.SYSTEM, METRIC_REGISTRY); - - static { - Spectator.globalRegistry().add(METRICS_REGISTRY); - } - - @Bean - public MetricRegistry metricRegistry() { - return METRIC_REGISTRY; - } - - @Bean - public MetricsRegistry metricsRegistry() { - return METRICS_REGISTRY; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfiguration.java deleted file mode 100644 index f9a6c2ec7b..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfiguration.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Configuration; - -import com.netflix.spectator.api.Spectator; -import com.netflix.spectator.micrometer.MicrometerRegistry; - -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.prometheus.PrometheusRenameFilter; - -/** - * Metrics prometheus module, sending all metrics to a Prometheus server. - * - *

Enable in config: conductor.metrics-prometheus.enabled=true - * - *

Make sure your dependencies include both spectator-reg-micrometer & - * spring-boot-starter-actuator - */ -@ConditionalOnProperty(value = "conductor.metrics-prometheus.enabled", havingValue = "true") -@Configuration -public class PrometheusMetricsConfiguration { - private static final Logger LOGGER = - LoggerFactory.getLogger(PrometheusMetricsConfiguration.class); - - public PrometheusMetricsConfiguration(MeterRegistry meterRegistry) { - LOGGER.info("Prometheus metrics module initialized"); - final MicrometerRegistry metricsRegistry = new MicrometerRegistry(meterRegistry); - meterRegistry.config().meterFilter(new PrometheusRenameFilter()); - Spectator.globalRegistry().add(metricsRegistry); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java deleted file mode 100644 index ab97c58e29..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPConnection.java +++ /dev/null @@ -1,391 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp; - -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import java.util.Iterator; -import java.util.Map; -import java.util.Set; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.TimeoutException; -import java.util.stream.Collectors; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; -import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType; - -import com.rabbitmq.client.Address; -import com.rabbitmq.client.BlockedListener; -import com.rabbitmq.client.Channel; -import com.rabbitmq.client.Connection; -import com.rabbitmq.client.ConnectionFactory; -import com.rabbitmq.client.ShutdownListener; -import com.rabbitmq.client.ShutdownSignalException; - -public class AMQPConnection { - - private static Logger LOGGER = LoggerFactory.getLogger(AMQPConnection.class); - private volatile Connection publisherConnection = null; - private volatile Connection subscriberConnection = null; - private ConnectionFactory factory = null; - private Address[] addresses = null; - private static AMQPConnection amqpConnection = null; - private static final String PUBLISHER = "Publisher"; - private static final String SUBSCRIBER = "Subscriber"; - private static final Map> availableChannelPool = - new ConcurrentHashMap>(); - private static final Map subscriberReservedChannelPool = - new ConcurrentHashMap(); - private static AMQPRetryPattern retrySettings = null; - - private AMQPConnection() {} - - private AMQPConnection(final ConnectionFactory factory, final Address[] address) { - this.factory = factory; - this.addresses = address; - } - - public static synchronized AMQPConnection getInstance( - final ConnectionFactory factory, - final Address[] address, - final AMQPRetryPattern retrySettings) { - if (AMQPConnection.amqpConnection == null) { - AMQPConnection.amqpConnection = new AMQPConnection(factory, address); - } - AMQPConnection.retrySettings = retrySettings; - return AMQPConnection.amqpConnection; - } - - // Exposed for UT - public static void setAMQPConnection(AMQPConnection amqpConnection) { - AMQPConnection.amqpConnection = amqpConnection; - } - - public Address[] getAddresses() { - return addresses; - } - - private Connection createConnection(String connectionPrefix) { - int retryIndex = 1; - while (true) { - try { - Connection connection = - factory.newConnection( - addresses, System.getenv("HOSTNAME") + "-" + connectionPrefix); - if (connection == null || !connection.isOpen()) { - throw new RuntimeException("Failed to open connection"); - } - connection.addShutdownListener( - new ShutdownListener() { - @Override - public void shutdownCompleted(ShutdownSignalException cause) { - LOGGER.error( - "Received a shutdown exception for the connection {}. reason {} cause{}", - connection.getClientProvidedName(), - cause.getMessage(), - cause); - } - }); - connection.addBlockedListener( - new BlockedListener() { - @Override - public void handleUnblocked() throws IOException { - LOGGER.info( - "Connection {} is unblocked", - connection.getClientProvidedName()); - } - - @Override - public void handleBlocked(String reason) throws IOException { - LOGGER.error( - "Connection {} is blocked. reason: {}", - connection.getClientProvidedName(), - reason); - } - }); - return connection; - } catch (final IOException e) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - final String error = - "IO error while connecting to " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")); - LOGGER.error(error, e); - throw new RuntimeException(error, e); - } - try { - retry.continueOrPropogate(e, retryIndex); - } catch (Exception ex) { - final String error = - "Retries completed. IO error while connecting to " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")); - LOGGER.error(error, e); - throw new RuntimeException(error, e); - } - retryIndex++; - } catch (final TimeoutException e) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - final String error = - "Timeout while connecting to " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")); - LOGGER.error(error, e); - throw new RuntimeException(error, e); - } - try { - retry.continueOrPropogate(e, retryIndex); - } catch (Exception ex) { - final String error = - "Retries completed. Timeout while connecting to " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")); - LOGGER.error(error, e); - throw new RuntimeException(error, e); - } - retryIndex++; - } - } - } - - public Channel getOrCreateChannel(ConnectionType connectionType, String queueOrExchangeName) - throws Exception { - LOGGER.debug( - "Accessing the channel for queueOrExchange {} with type {} ", - queueOrExchangeName, - connectionType); - switch (connectionType) { - case SUBSCRIBER: - String subChnName = connectionType + ";" + queueOrExchangeName; - if (subscriberReservedChannelPool.containsKey(subChnName)) { - Channel locChn = subscriberReservedChannelPool.get(subChnName); - if (locChn != null && locChn.isOpen()) { - return locChn; - } - } - synchronized (this) { - if (subscriberConnection == null || !subscriberConnection.isOpen()) { - subscriberConnection = createConnection(SUBSCRIBER); - } - } - Channel subChn = borrowChannel(connectionType, subscriberConnection); - // Add the subscribed channels to Map to avoid messages being acknowledged on - // different from the subscribed one - subscriberReservedChannelPool.put(subChnName, subChn); - return subChn; - case PUBLISHER: - synchronized (this) { - if (publisherConnection == null || !publisherConnection.isOpen()) { - publisherConnection = createConnection(PUBLISHER); - } - } - return borrowChannel(connectionType, publisherConnection); - default: - return null; - } - } - - private Channel getOrCreateChannel(ConnectionType connType, Connection rmqConnection) { - // Channel creation is required - Channel locChn = null; - int retryIndex = 1; - while (true) { - try { - LOGGER.debug("Creating a channel for " + connType); - locChn = rmqConnection.createChannel(); - if (locChn == null || !locChn.isOpen()) { - throw new RuntimeException("Fail to open " + connType + " channel"); - } - locChn.addShutdownListener( - cause -> { - LOGGER.error( - connType + " Channel has been shutdown: {}", - cause.getMessage(), - cause); - }); - return locChn; - } catch (final IOException e) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - throw new RuntimeException( - "Cannot open " - + connType - + " channel on " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")), - e); - } - try { - retry.continueOrPropogate(e, retryIndex); - } catch (Exception ex) { - throw new RuntimeException( - "Retries completed. Cannot open " - + connType - + " channel on " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")), - e); - } - retryIndex++; - } catch (final Exception e) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - throw new RuntimeException( - "Cannot open " - + connType - + " channel on " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")), - e); - } - try { - retry.continueOrPropogate(e, retryIndex); - } catch (Exception ex) { - throw new RuntimeException( - "Retries completed. Cannot open " - + connType - + " channel on " - + Arrays.stream(addresses) - .map(address -> address.toString()) - .collect(Collectors.joining(",")), - e); - } - retryIndex++; - } - } - } - - public void close() { - LOGGER.info("Closing all connections and channels"); - try { - closeChannelsInMap(ConnectionType.PUBLISHER); - closeChannelsInMap(ConnectionType.SUBSCRIBER); - closeConnection(publisherConnection); - closeConnection(subscriberConnection); - } finally { - availableChannelPool.clear(); - publisherConnection = null; - subscriberConnection = null; - } - } - - private void closeChannelsInMap(ConnectionType conType) { - Set channels = availableChannelPool.get(conType); - if (channels != null && !channels.isEmpty()) { - Iterator itr = channels.iterator(); - while (itr.hasNext()) { - Channel channel = itr.next(); - closeChannel(channel); - } - channels.clear(); - } - } - - private void closeConnection(Connection connection) { - if (connection == null || !connection.isOpen()) { - LOGGER.warn("Connection is null or closed already. Not closing it again"); - } else { - try { - connection.close(); - } catch (Exception e) { - LOGGER.warn("Fail to close connection: {}", e.getMessage(), e); - } - } - } - - private void closeChannel(Channel channel) { - if (channel == null || !channel.isOpen()) { - LOGGER.warn("Channel is null or closed already. Not closing it again"); - } else { - try { - channel.close(); - } catch (Exception e) { - LOGGER.warn("Fail to close channel: {}", e.getMessage(), e); - } - } - } - - /** - * Gets the channel for specified connectionType. - * - * @param connectionType holds the multiple channels for different connection types for thread - * safe operation. - * @param rmqConnection publisher or subscriber connection instance - * @return channel instance - * @throws Exception - */ - private synchronized Channel borrowChannel( - ConnectionType connectionType, Connection rmqConnection) throws Exception { - if (!availableChannelPool.containsKey(connectionType)) { - Channel channel = getOrCreateChannel(connectionType, rmqConnection); - LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType)); - return channel; - } - Set channels = availableChannelPool.get(connectionType); - if (channels != null && channels.isEmpty()) { - Channel channel = getOrCreateChannel(connectionType, rmqConnection); - LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_CREATION_SUCCESS, connectionType)); - return channel; - } - Iterator itr = channels.iterator(); - while (itr.hasNext()) { - Channel channel = itr.next(); - if (channel != null && channel.isOpen()) { - itr.remove(); - LOGGER.info( - String.format(AMQPConstants.INFO_CHANNEL_BORROW_SUCCESS, connectionType)); - return channel; - } else { - itr.remove(); - } - } - Channel channel = getOrCreateChannel(connectionType, rmqConnection); - LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RESET_SUCCESS, connectionType)); - return channel; - } - - /** - * Returns the channel to connection pool for specified connectionType. - * - * @param connectionType - * @param channel - * @throws Exception - */ - public synchronized void returnChannel(ConnectionType connectionType, Channel channel) - throws Exception { - if (channel == null || !channel.isOpen()) { - channel = null; // channel is reset. - } - Set channels = availableChannelPool.get(connectionType); - if (channels == null) { - channels = new HashSet(); - availableChannelPool.put(connectionType, channels); - } - channels.add(channel); - LOGGER.info(String.format(AMQPConstants.INFO_CHANNEL_RETURN_SUCCESS, connectionType)); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java deleted file mode 100644 index 78bc497d98..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueue.java +++ /dev/null @@ -1,809 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; -import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; -import com.netflix.conductor.contribs.queue.amqp.util.ConnectionType; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.events.queue.ObservableQueue; -import com.netflix.conductor.metrics.Monitors; - -import com.google.common.collect.Maps; -import com.rabbitmq.client.AMQP; -import com.rabbitmq.client.Address; -import com.rabbitmq.client.Channel; -import com.rabbitmq.client.ConnectionFactory; -import com.rabbitmq.client.Consumer; -import com.rabbitmq.client.DefaultConsumer; -import com.rabbitmq.client.Envelope; -import com.rabbitmq.client.GetResponse; -import rx.Observable; -import rx.Subscriber; - -/** - * @author Ritu Parathody - */ -public class AMQPObservableQueue implements ObservableQueue { - - private static final Logger LOGGER = LoggerFactory.getLogger(AMQPObservableQueue.class); - - private final AMQPSettings settings; - private final AMQPRetryPattern retrySettings; - private final String QUEUE_TYPE = "x-queue-type"; - private final int batchSize; - private final boolean useExchange; - private int pollTimeInMS; - private AMQPConnection amqpConnection; - - protected LinkedBlockingQueue messages = new LinkedBlockingQueue<>(); - private volatile boolean running; - - public AMQPObservableQueue( - ConnectionFactory factory, - Address[] addresses, - boolean useExchange, - AMQPSettings settings, - AMQPRetryPattern retrySettings, - int batchSize, - int pollTimeInMS) { - if (factory == null) { - throw new IllegalArgumentException("Connection factory is undefined"); - } - if (addresses == null || addresses.length == 0) { - throw new IllegalArgumentException("Addresses are undefined"); - } - if (settings == null) { - throw new IllegalArgumentException("Settings are undefined"); - } - if (batchSize <= 0) { - throw new IllegalArgumentException("Batch size must be greater than 0"); - } - if (pollTimeInMS <= 0) { - throw new IllegalArgumentException("Poll time must be greater than 0 ms"); - } - this.useExchange = useExchange; - this.settings = settings; - this.batchSize = batchSize; - this.amqpConnection = AMQPConnection.getInstance(factory, addresses, retrySettings); - this.retrySettings = retrySettings; - this.setPollTimeInMS(pollTimeInMS); - } - - @Override - public Observable observe() { - Observable.OnSubscribe onSubscribe = null; - // This will enabled the messages to be processed one after the other as per the - // observable next behavior. - if (settings.isSequentialProcessing()) { - LOGGER.info("Subscribing for the message processing on schedule basis"); - receiveMessages(); - onSubscribe = - subscriber -> { - Observable interval = - Observable.interval(pollTimeInMS, TimeUnit.MILLISECONDS); - interval.flatMap( - (Long x) -> { - if (!isRunning()) { - LOGGER.debug( - "Component stopped, skip listening for messages from RabbitMQ"); - return Observable.from(Collections.emptyList()); - } else { - List available = new LinkedList<>(); - messages.drainTo(available); - - if (!available.isEmpty()) { - AtomicInteger count = new AtomicInteger(0); - StringBuilder buffer = new StringBuilder(); - available.forEach( - msg -> { - buffer.append(msg.getId()) - .append("=") - .append(msg.getPayload()); - count.incrementAndGet(); - - if (count.get() - < available.size()) { - buffer.append(","); - } - }); - LOGGER.info( - String.format( - "Batch from %s to conductor is %s", - settings - .getQueueOrExchangeName(), - buffer.toString())); - } - return Observable.from(available); - } - }) - .subscribe(subscriber::onNext, subscriber::onError); - }; - LOGGER.info("Subscribed for the message processing on schedule basis"); - } else { - onSubscribe = - subscriber -> { - LOGGER.info("Subscribing for the event based AMQP message processing"); - receiveMessages(subscriber); - LOGGER.info("Subscribed for the event based AMQP message processing"); - }; - } - return Observable.create(onSubscribe); - } - - @Override - public String getType() { - return useExchange ? AMQPConstants.AMQP_EXCHANGE_TYPE : AMQPConstants.AMQP_QUEUE_TYPE; - } - - @Override - public String getName() { - return settings.getEventName(); - } - - @Override - public String getURI() { - return settings.getQueueOrExchangeName(); - } - - public int getBatchSize() { - return batchSize; - } - - public AMQPSettings getSettings() { - return settings; - } - - public Address[] getAddresses() { - return amqpConnection.getAddresses(); - } - - @Override - public List ack(List messages) { - final List processedDeliveryTags = new ArrayList<>(); - for (final Message message : messages) { - int retryIndex = 1; - while (true) { - try { - LOGGER.info("ACK message with delivery tag {}", message.getReceipt()); - Channel chn = - amqpConnection.getOrCreateChannel( - ConnectionType.SUBSCRIBER, - getSettings().getQueueOrExchangeName()); - chn.basicAck(Long.parseLong(message.getReceipt()), false); - processedDeliveryTags.add(message.getReceipt()); - LOGGER.info("Ack'ed the message with delivery tag {}", message.getReceipt()); - break; - } catch (final Exception e) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - LOGGER.error( - "Cannot ACK message with delivery tag {}", message.getReceipt(), e); - } - try { - retry.continueOrPropogate(e, retryIndex); - } catch (Exception ex) { - LOGGER.error( - "Retries completed. Cannot ACK message with delivery tag {}", - message.getReceipt(), - e); - } - retryIndex++; - } - } - } - return processedDeliveryTags; - } - - private static AMQP.BasicProperties buildBasicProperties( - final Message message, final AMQPSettings settings) { - return new AMQP.BasicProperties.Builder() - .messageId( - StringUtils.isEmpty(message.getId()) - ? UUID.randomUUID().toString() - : message.getId()) - .correlationId( - StringUtils.isEmpty(message.getReceipt()) - ? UUID.randomUUID().toString() - : message.getReceipt()) - .contentType(settings.getContentType()) - .contentEncoding(settings.getContentEncoding()) - .deliveryMode(settings.getDeliveryMode()) - .build(); - } - - private void publishMessage(Message message, String exchange, String routingKey) { - Channel chn = null; - int retryIndex = 1; - while (true) { - try { - final String payload = message.getPayload(); - chn = - amqpConnection.getOrCreateChannel( - ConnectionType.PUBLISHER, getSettings().getQueueOrExchangeName()); - chn.basicPublish( - exchange, - routingKey, - buildBasicProperties(message, settings), - payload.getBytes(settings.getContentEncoding())); - LOGGER.info(String.format("Published message to %s: %s", exchange, payload)); - break; - } catch (Exception ex) { - AMQPRetryPattern retry = retrySettings; - if (retry == null) { - LOGGER.error( - "Failed to publish message {} to {}", - message.getPayload(), - exchange, - ex); - throw new RuntimeException(ex); - } - try { - retry.continueOrPropogate(ex, retryIndex); - } catch (Exception e) { - LOGGER.error( - "Retries completed. Failed to publish message {} to {}", - message.getPayload(), - exchange, - ex); - throw new RuntimeException(ex); - } - retryIndex++; - } finally { - if (chn != null) { - try { - amqpConnection.returnChannel(ConnectionType.PUBLISHER, chn); - } catch (Exception e) { - LOGGER.error( - "Failed to return the channel of {}. {}", - ConnectionType.PUBLISHER, - e); - } - } - } - } - } - - @Override - public void publish(List messages) { - try { - final String exchange, routingKey; - if (useExchange) { - // Use exchange + routing key for publishing - getOrCreateExchange( - ConnectionType.PUBLISHER, - settings.getQueueOrExchangeName(), - settings.getExchangeType(), - settings.isDurable(), - settings.autoDelete(), - settings.getArguments()); - exchange = settings.getQueueOrExchangeName(); - routingKey = settings.getRoutingKey(); - } else { - // Use queue for publishing - final AMQP.Queue.DeclareOk declareOk = - getOrCreateQueue( - ConnectionType.PUBLISHER, - settings.getQueueOrExchangeName(), - settings.isDurable(), - settings.isExclusive(), - settings.autoDelete(), - settings.getArguments()); - exchange = StringUtils.EMPTY; // Empty exchange name for queue - routingKey = declareOk.getQueue(); // Routing name is the name of queue - } - messages.forEach(message -> publishMessage(message, exchange, routingKey)); - } catch (final RuntimeException ex) { - throw ex; - } catch (final Exception ex) { - LOGGER.error("Failed to publish messages: {}", ex.getMessage(), ex); - throw new RuntimeException(ex); - } - } - - @Override - public void setUnackTimeout(Message message, long unackTimeout) { - throw new UnsupportedOperationException(); - } - - @Override - public long size() { - Channel chn = null; - try { - chn = - amqpConnection.getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()); - return chn.messageCount(settings.getQueueOrExchangeName()); - } catch (final Exception e) { - throw new RuntimeException(e); - } finally { - if (chn != null) { - try { - amqpConnection.returnChannel(ConnectionType.SUBSCRIBER, chn); - } catch (Exception e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } - } - } - } - - @Override - public void close() { - amqpConnection.close(); - } - - @Override - public void start() { - LOGGER.info( - "Started listening to {}:{}", - getClass().getSimpleName(), - settings.getQueueOrExchangeName()); - running = true; - } - - @Override - public void stop() { - LOGGER.info( - "Stopped listening to {}:{}", - getClass().getSimpleName(), - settings.getQueueOrExchangeName()); - running = false; - } - - @Override - public boolean isRunning() { - return running; - } - - public static class Builder { - - private final Address[] addresses; - private final int batchSize; - private final int pollTimeInMS; - private final ConnectionFactory factory; - private final AMQPEventQueueProperties properties; - - public Builder(AMQPEventQueueProperties properties) { - this.properties = properties; - this.addresses = buildAddressesFromHosts(); - this.factory = buildConnectionFactory(); - // messages polling settings - this.batchSize = properties.getBatchSize(); - this.pollTimeInMS = (int) properties.getPollTimeDuration().toMillis(); - } - - private Address[] buildAddressesFromHosts() { - // Read hosts from config - final String hosts = properties.getHosts(); - if (StringUtils.isEmpty(hosts)) { - throw new IllegalArgumentException("Hosts are undefined"); - } - return Address.parseAddresses(hosts); - } - - private ConnectionFactory buildConnectionFactory() { - final ConnectionFactory factory = new ConnectionFactory(); - // Get rabbitmq username from config - final String username = properties.getUsername(); - if (StringUtils.isEmpty(username)) { - throw new IllegalArgumentException("Username is null or empty"); - } else { - factory.setUsername(username); - } - // Get rabbitmq password from config - final String password = properties.getPassword(); - if (StringUtils.isEmpty(password)) { - throw new IllegalArgumentException("Password is null or empty"); - } else { - factory.setPassword(password); - } - // Get vHost from config - final String virtualHost = properties.getVirtualHost(); - ; - if (StringUtils.isEmpty(virtualHost)) { - throw new IllegalArgumentException("Virtual host is null or empty"); - } else { - factory.setVirtualHost(virtualHost); - } - // Get server port from config - final int port = properties.getPort(); - if (port <= 0) { - throw new IllegalArgumentException("Port must be greater than 0"); - } else { - factory.setPort(port); - } - final boolean useNio = properties.isUseNio(); - if (useNio) { - factory.useNio(); - } - factory.setConnectionTimeout(properties.getConnectionTimeoutInMilliSecs()); - factory.setRequestedHeartbeat(properties.getRequestHeartbeatTimeoutInSecs()); - factory.setNetworkRecoveryInterval(properties.getNetworkRecoveryIntervalInMilliSecs()); - factory.setHandshakeTimeout(properties.getHandshakeTimeoutInMilliSecs()); - factory.setAutomaticRecoveryEnabled(true); - factory.setTopologyRecoveryEnabled(true); - factory.setRequestedChannelMax(properties.getMaxChannelCount()); - return factory; - } - - public AMQPObservableQueue build(final boolean useExchange, final String queueURI) { - final AMQPSettings settings = new AMQPSettings(properties).fromURI(queueURI); - final AMQPRetryPattern retrySettings = - new AMQPRetryPattern( - properties.getLimit(), properties.getDuration(), properties.getType()); - return new AMQPObservableQueue( - factory, - addresses, - useExchange, - settings, - retrySettings, - batchSize, - pollTimeInMS); - } - } - - private AMQP.Exchange.DeclareOk getOrCreateExchange(ConnectionType connectionType) - throws Exception { - return getOrCreateExchange( - connectionType, - settings.getQueueOrExchangeName(), - settings.getExchangeType(), - settings.isDurable(), - settings.autoDelete(), - settings.getArguments()); - } - - private AMQP.Exchange.DeclareOk getOrCreateExchange( - ConnectionType connectionType, - String name, - final String type, - final boolean isDurable, - final boolean autoDelete, - final Map arguments) - throws Exception { - if (StringUtils.isEmpty(name)) { - throw new RuntimeException("Exchange name is undefined"); - } - if (StringUtils.isEmpty(type)) { - throw new RuntimeException("Exchange type is undefined"); - } - Channel chn = null; - try { - LOGGER.debug("Creating exchange {} of type {}", name, type); - chn = - amqpConnection.getOrCreateChannel( - connectionType, getSettings().getQueueOrExchangeName()); - return chn.exchangeDeclare(name, type, isDurable, autoDelete, arguments); - } catch (final Exception e) { - LOGGER.warn("Failed to create exchange {} of type {}", name, type, e); - throw e; - } finally { - if (chn != null) { - try { - amqpConnection.returnChannel(connectionType, chn); - } catch (Exception e) { - LOGGER.error("Failed to return the channel of {}. {}", connectionType, e); - } - } - } - } - - private AMQP.Queue.DeclareOk getOrCreateQueue(ConnectionType connectionType) throws Exception { - return getOrCreateQueue( - connectionType, - settings.getQueueOrExchangeName(), - settings.isDurable(), - settings.isExclusive(), - settings.autoDelete(), - settings.getArguments()); - } - - private AMQP.Queue.DeclareOk getOrCreateQueue( - ConnectionType connectionType, - final String name, - final boolean isDurable, - final boolean isExclusive, - final boolean autoDelete, - final Map arguments) - throws Exception { - if (StringUtils.isEmpty(name)) { - throw new RuntimeException("Queue name is undefined"); - } - arguments.put(QUEUE_TYPE, settings.getQueueType()); - Channel chn = null; - try { - LOGGER.debug("Creating queue {}", name); - chn = - amqpConnection.getOrCreateChannel( - connectionType, getSettings().getQueueOrExchangeName()); - return chn.queueDeclare(name, isDurable, isExclusive, autoDelete, arguments); - } catch (final Exception e) { - LOGGER.warn("Failed to create queue {}", name, e); - throw e; - } finally { - if (chn != null) { - try { - amqpConnection.returnChannel(connectionType, chn); - } catch (Exception e) { - LOGGER.error("Failed to return the channel of {}. {}", connectionType, e); - } - } - } - } - - private static Message asMessage(AMQPSettings settings, GetResponse response) throws Exception { - if (response == null) { - return null; - } - final Message message = new Message(); - message.setId(response.getProps().getMessageId()); - message.setPayload(new String(response.getBody(), settings.getContentEncoding())); - message.setReceipt(String.valueOf(response.getEnvelope().getDeliveryTag())); - return message; - } - - private void receiveMessagesFromQueue(String queueName) throws Exception { - LOGGER.debug("Accessing channel for queue {}", queueName); - - Consumer consumer = - new DefaultConsumer( - amqpConnection.getOrCreateChannel( - ConnectionType.SUBSCRIBER, - getSettings().getQueueOrExchangeName())) { - - @Override - public void handleDelivery( - final String consumerTag, - final Envelope envelope, - final AMQP.BasicProperties properties, - final byte[] body) - throws IOException { - try { - Message message = - asMessage( - settings, - new GetResponse( - envelope, properties, body, Integer.MAX_VALUE)); - if (message != null) { - if (LOGGER.isDebugEnabled()) { - LOGGER.debug( - "Got message with ID {} and receipt {}", - message.getId(), - message.getReceipt()); - } - messages.add(message); - LOGGER.info("receiveMessagesFromQueue- End method {}", messages); - } - } catch (InterruptedException e) { - LOGGER.error( - "Issue in handling the mesages for the subscriber with consumer tag {}. {}", - consumerTag, - e); - Thread.currentThread().interrupt(); - } catch (Exception e) { - LOGGER.error( - "Issue in handling the mesages for the subscriber with consumer tag {}. {}", - consumerTag, - e); - } - } - - public void handleCancel(String consumerTag) throws IOException { - LOGGER.error( - "Recieved a consumer cancel notification for subscriber {}", - consumerTag); - } - }; - - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .basicConsume(queueName, false, consumer); - Monitors.recordEventQueueMessagesProcessed(getType(), queueName, messages.size()); - } - - private void receiveMessagesFromQueue(String queueName, Subscriber subscriber) - throws Exception { - LOGGER.debug("Accessing channel for queue {}", queueName); - - Consumer consumer = - new DefaultConsumer( - amqpConnection.getOrCreateChannel( - ConnectionType.SUBSCRIBER, - getSettings().getQueueOrExchangeName())) { - - @Override - public void handleDelivery( - final String consumerTag, - final Envelope envelope, - final AMQP.BasicProperties properties, - final byte[] body) - throws IOException { - try { - Message message = - asMessage( - settings, - new GetResponse( - envelope, properties, body, Integer.MAX_VALUE)); - if (message == null) { - return; - } - LOGGER.info( - "Got message with ID {} and receipt {}", - message.getId(), - message.getReceipt()); - LOGGER.debug("Message content {}", message); - // Not using thread-pool here as the number of concurrent threads are - // controlled - // by the number of messages delivery using pre-fetch count in RabbitMQ - Thread newThread = - new Thread( - () -> { - LOGGER.info( - "Spawning a new thread for message with ID {}", - message.getId()); - subscriber.onNext(message); - }); - newThread.start(); - } catch (InterruptedException e) { - LOGGER.error( - "Issue in handling the mesages for the subscriber with consumer tag {}. {}", - consumerTag, - e); - Thread.currentThread().interrupt(); - } catch (Exception e) { - LOGGER.error( - "Issue in handling the mesages for the subscriber with consumer tag {}. {}", - consumerTag, - e); - } - } - - public void handleCancel(String consumerTag) throws IOException { - LOGGER.error( - "Recieved a consumer cancel notification for subscriber {}", - consumerTag); - } - }; - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .basicConsume(queueName, false, consumer); - } - - protected void receiveMessages() { - try { - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .basicQos(batchSize); - String queueName; - if (useExchange) { - // Consume messages from an exchange - getOrCreateExchange(ConnectionType.SUBSCRIBER); - /* - * Create queue if not present based on the settings provided in the queue URI - * or configuration properties. Sample URI format: - * amqp-exchange:myExchange?exchangeType=topic&routingKey=myRoutingKey&exclusive - * =false&autoDelete=false&durable=true Default settings if not provided in the - * queue URI or properties: isDurable: true, autoDelete: false, isExclusive: - * false The same settings are currently used during creation of exchange as - * well as queue. TODO: This can be enhanced further to get the settings - * separately for exchange and queue from the URI - */ - final AMQP.Queue.DeclareOk declareOk = - getOrCreateQueue( - ConnectionType.SUBSCRIBER, - String.format("bound_to_%s", settings.getQueueOrExchangeName()), - settings.isDurable(), - settings.isExclusive(), - settings.autoDelete(), - Maps.newHashMap()); - // Bind the declared queue to exchange - queueName = declareOk.getQueue(); - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .queueBind( - queueName, - settings.getQueueOrExchangeName(), - settings.getRoutingKey()); - } else { - // Consume messages from a queue - queueName = getOrCreateQueue(ConnectionType.SUBSCRIBER).getQueue(); - } - // Consume messages - LOGGER.info("Consuming from queue {}", queueName); - receiveMessagesFromQueue(queueName); - } catch (Exception exception) { - LOGGER.error("Exception while getting messages from RabbitMQ", exception); - Monitors.recordObservableQMessageReceivedErrors(getType()); - } - } - - protected void receiveMessages(Subscriber subscriber) { - try { - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .basicQos(batchSize); - String queueName; - if (useExchange) { - // Consume messages from an exchange - getOrCreateExchange(ConnectionType.SUBSCRIBER); - /* - * Create queue if not present based on the settings provided in the queue URI - * or configuration properties. Sample URI format: - * amqp-exchange:myExchange?exchangeType=topic&routingKey=myRoutingKey&exclusive - * =false&autoDelete=false&durable=true Default settings if not provided in the - * queue URI or properties: isDurable: true, autoDelete: false, isExclusive: - * false The same settings are currently used during creation of exchange as - * well as queue. TODO: This can be enhanced further to get the settings - * separately for exchange and queue from the URI - */ - final AMQP.Queue.DeclareOk declareOk = - getOrCreateQueue( - ConnectionType.SUBSCRIBER, - String.format("bound_to_%s", settings.getQueueOrExchangeName()), - settings.isDurable(), - settings.isExclusive(), - settings.autoDelete(), - Maps.newHashMap()); - // Bind the declared queue to exchange - queueName = declareOk.getQueue(); - amqpConnection - .getOrCreateChannel( - ConnectionType.SUBSCRIBER, getSettings().getQueueOrExchangeName()) - .queueBind( - queueName, - settings.getQueueOrExchangeName(), - settings.getRoutingKey()); - } else { - // Consume messages from a queue - queueName = getOrCreateQueue(ConnectionType.SUBSCRIBER).getQueue(); - } - // Consume messages - LOGGER.info("Consuming from queue {}", queueName); - receiveMessagesFromQueue(queueName, subscriber); - } catch (Exception exception) { - LOGGER.error("Exception while getting messages from RabbitMQ", exception); - Monitors.recordObservableQMessageReceivedErrors(getType()); - } - } - - public int getPollTimeInMS() { - return pollTimeInMS; - } - - public void setPollTimeInMS(int pollTimeInMS) { - this.pollTimeInMS = pollTimeInMS; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java deleted file mode 100644 index 9691b3c6eb..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueConfiguration.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.config; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.lang3.StringUtils; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder; -import com.netflix.conductor.core.config.ConductorProperties; -import com.netflix.conductor.core.events.EventQueueProvider; -import com.netflix.conductor.core.events.queue.ObservableQueue; -import com.netflix.conductor.model.TaskModel.Status; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(AMQPEventQueueProperties.class) -@ConditionalOnProperty(name = "conductor.event-queues.amqp.enabled", havingValue = "true") -public class AMQPEventQueueConfiguration { - - private enum QUEUE_TYPE { - AMQP_QUEUE("amqp_queue"), - AMQP_EXCHANGE("amqp_exchange"); - - private final String type; - - QUEUE_TYPE(String type) { - this.type = type; - } - - public String getType() { - return type; - } - } - - @Bean - public EventQueueProvider amqpEventQueueProvider(AMQPEventQueueProperties properties) { - return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_QUEUE.getType(), false); - } - - @Bean - public EventQueueProvider amqpExchangeEventQueueProvider(AMQPEventQueueProperties properties) { - return new AMQPEventQueueProvider(properties, QUEUE_TYPE.AMQP_EXCHANGE.getType(), true); - } - - @ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "amqp") - @Bean - public Map getQueues( - ConductorProperties conductorProperties, AMQPEventQueueProperties properties) { - String stack = ""; - if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) { - stack = conductorProperties.getStack() + "_"; - } - final boolean useExchange = properties.isUseExchange(); - - Status[] statuses = new Status[] {Status.COMPLETED, Status.FAILED}; - Map queues = new HashMap<>(); - for (Status status : statuses) { - String queuePrefix = - StringUtils.isBlank(properties.getListenerQueuePrefix()) - ? conductorProperties.getAppId() + "_amqp_notify_" + stack - : properties.getListenerQueuePrefix(); - - String queueName = queuePrefix + status.name(); - - final ObservableQueue queue = new Builder(properties).build(useExchange, queueName); - queues.put(status, queue); - } - - return queues; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java deleted file mode 100644 index 335e5de277..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProperties.java +++ /dev/null @@ -1,303 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.config; - -import java.time.Duration; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -import com.netflix.conductor.contribs.queue.amqp.util.RetryType; - -import com.rabbitmq.client.AMQP.PROTOCOL; -import com.rabbitmq.client.ConnectionFactory; - -@ConfigurationProperties("conductor.event-queues.amqp") -public class AMQPEventQueueProperties { - - private int batchSize = 1; - - private Duration pollTimeDuration = Duration.ofMillis(100); - - private String hosts = ConnectionFactory.DEFAULT_HOST; - - private String username = ConnectionFactory.DEFAULT_USER; - - private String password = ConnectionFactory.DEFAULT_PASS; - - private String virtualHost = ConnectionFactory.DEFAULT_VHOST; - - private int port = PROTOCOL.PORT; - - private int connectionTimeoutInMilliSecs = 180000; - private int networkRecoveryIntervalInMilliSecs = 5000; - private int requestHeartbeatTimeoutInSecs = 30; - private int handshakeTimeoutInMilliSecs = 180000; - private int maxChannelCount = 5000; - private int limit = 50; - private int duration = 1000; - private RetryType retryType = RetryType.REGULARINTERVALS; - - public int getLimit() { - return limit; - } - - public void setLimit(int limit) { - this.limit = limit; - } - - public int getDuration() { - return duration; - } - - public void setDuration(int duration) { - this.duration = duration; - } - - public RetryType getType() { - return retryType; - } - - public void setType(RetryType type) { - this.retryType = type; - } - - public int getConnectionTimeoutInMilliSecs() { - return connectionTimeoutInMilliSecs; - } - - public void setConnectionTimeoutInMilliSecs(int connectionTimeoutInMilliSecs) { - this.connectionTimeoutInMilliSecs = connectionTimeoutInMilliSecs; - } - - public int getHandshakeTimeoutInMilliSecs() { - return handshakeTimeoutInMilliSecs; - } - - public void setHandshakeTimeoutInMilliSecs(int handshakeTimeoutInMilliSecs) { - this.handshakeTimeoutInMilliSecs = handshakeTimeoutInMilliSecs; - } - - public int getMaxChannelCount() { - return maxChannelCount; - } - - public void setMaxChannelCount(int maxChannelCount) { - this.maxChannelCount = maxChannelCount; - } - - private boolean useNio = false; - - private boolean durable = true; - - private boolean exclusive = false; - - private boolean autoDelete = false; - - private String contentType = "application/json"; - - private String contentEncoding = "UTF-8"; - - private String exchangeType = "topic"; - - private String queueType = "classic"; - - private boolean sequentialMsgProcessing = true; - - private int deliveryMode = 2; - - private boolean useExchange = true; - - private String listenerQueuePrefix = ""; - - public int getBatchSize() { - return batchSize; - } - - public void setBatchSize(int batchSize) { - this.batchSize = batchSize; - } - - public Duration getPollTimeDuration() { - return pollTimeDuration; - } - - public void setPollTimeDuration(Duration pollTimeDuration) { - this.pollTimeDuration = pollTimeDuration; - } - - public String getHosts() { - return hosts; - } - - public void setHosts(String hosts) { - this.hosts = hosts; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public String getVirtualHost() { - return virtualHost; - } - - public void setVirtualHost(String virtualHost) { - this.virtualHost = virtualHost; - } - - public int getPort() { - return port; - } - - public void setPort(int port) { - this.port = port; - } - - public boolean isUseNio() { - return useNio; - } - - public void setUseNio(boolean useNio) { - this.useNio = useNio; - } - - public boolean isDurable() { - return durable; - } - - public void setDurable(boolean durable) { - this.durable = durable; - } - - public boolean isExclusive() { - return exclusive; - } - - public void setExclusive(boolean exclusive) { - this.exclusive = exclusive; - } - - public boolean isAutoDelete() { - return autoDelete; - } - - public void setAutoDelete(boolean autoDelete) { - this.autoDelete = autoDelete; - } - - public String getContentType() { - return contentType; - } - - public void setContentType(String contentType) { - this.contentType = contentType; - } - - public String getContentEncoding() { - return contentEncoding; - } - - public void setContentEncoding(String contentEncoding) { - this.contentEncoding = contentEncoding; - } - - public String getExchangeType() { - return exchangeType; - } - - public void setExchangeType(String exchangeType) { - this.exchangeType = exchangeType; - } - - public int getDeliveryMode() { - return deliveryMode; - } - - public void setDeliveryMode(int deliveryMode) { - this.deliveryMode = deliveryMode; - } - - public boolean isUseExchange() { - return useExchange; - } - - public void setUseExchange(boolean useExchange) { - this.useExchange = useExchange; - } - - public String getListenerQueuePrefix() { - return listenerQueuePrefix; - } - - public void setListenerQueuePrefix(String listenerQueuePrefix) { - this.listenerQueuePrefix = listenerQueuePrefix; - } - - public String getQueueType() { - return queueType; - } - - /** - * @param queueType Supports two queue types, 'classic' and 'quorum'. Classic will be be - * deprecated in 2022 and its usage discouraged from RabbitMQ community. So not using enum - * type here to hold different values. - */ - public void setQueueType(String queueType) { - this.queueType = queueType; - } - - /** - * @return the sequentialMsgProcessing - */ - public boolean isSequentialMsgProcessing() { - return sequentialMsgProcessing; - } - - /** - * @param sequentialMsgProcessing the sequentialMsgProcessing to set Supports sequential and - * parallel message processing capabilities. In parallel message processing, number of - * threads are controlled by batch size. No thread control or execution framework required - * here as threads are limited and short-lived. - */ - public void setSequentialMsgProcessing(boolean sequentialMsgProcessing) { - this.sequentialMsgProcessing = sequentialMsgProcessing; - } - - public int getNetworkRecoveryIntervalInMilliSecs() { - return networkRecoveryIntervalInMilliSecs; - } - - public void setNetworkRecoveryIntervalInMilliSecs(int networkRecoveryIntervalInMilliSecs) { - this.networkRecoveryIntervalInMilliSecs = networkRecoveryIntervalInMilliSecs; - } - - public int getRequestHeartbeatTimeoutInSecs() { - return requestHeartbeatTimeoutInSecs; - } - - public void setRequestHeartbeatTimeoutInSecs(int requestHeartbeatTimeoutInSecs) { - this.requestHeartbeatTimeoutInSecs = requestHeartbeatTimeoutInSecs; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java deleted file mode 100644 index 7bf36cdd7f..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPEventQueueProvider.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.config; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.lang.NonNull; - -import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue; -import com.netflix.conductor.contribs.queue.amqp.AMQPObservableQueue.Builder; -import com.netflix.conductor.core.events.EventQueueProvider; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -/** - * @author Ritu Parathody - */ -public class AMQPEventQueueProvider implements EventQueueProvider { - - private static final Logger LOGGER = LoggerFactory.getLogger(AMQPEventQueueProvider.class); - protected Map queues = new ConcurrentHashMap<>(); - private final boolean useExchange; - private final AMQPEventQueueProperties properties; - private final String queueType; - - public AMQPEventQueueProvider( - AMQPEventQueueProperties properties, String queueType, boolean useExchange) { - this.properties = properties; - this.queueType = queueType; - this.useExchange = useExchange; - } - - @Override - public String getQueueType() { - return queueType; - } - - @Override - @NonNull - public ObservableQueue getQueue(String queueURI) { - if (LOGGER.isInfoEnabled()) { - LOGGER.info("Retrieve queue with URI {}", queueURI); - } - // Build the queue with the inner Builder class of AMQPObservableQueue - return queues.computeIfAbsent(queueURI, q -> new Builder(properties).build(useExchange, q)); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java deleted file mode 100644 index 3890a9980b..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/config/AMQPRetryPattern.java +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.config; - -import com.netflix.conductor.contribs.queue.amqp.util.RetryType; - -public class AMQPRetryPattern { - - private int limit = 50; - private int duration = 1000; - private RetryType type = RetryType.REGULARINTERVALS; - - public AMQPRetryPattern() {} - - public AMQPRetryPattern(int limit, int duration, RetryType type) { - this.limit = limit; - this.duration = duration; - this.type = type; - } - - /** - * This gets executed if the retry index is within the allowed limits, otherwise exception will - * be thrown. - * - * @throws Exception - */ - public void continueOrPropogate(Exception ex, int retryIndex) throws Exception { - if (retryIndex > limit) { - throw ex; - } - // Regular Intervals is the default - long waitDuration = duration; - if (type == RetryType.INCREMENTALINTERVALS) { - waitDuration = duration * retryIndex; - } else if (type == RetryType.EXPONENTIALBACKOFF) { - waitDuration = (long) Math.pow(2, retryIndex) * duration; - } - try { - Thread.sleep(waitDuration); - } catch (InterruptedException ignored) { - Thread.currentThread().interrupt(); - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java deleted file mode 100644 index d5ddb6f0f0..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConfigurations.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.util; - -/** - * @author Ritu Parathody - */ -public enum AMQPConfigurations { - - // queue exchange settings - PARAM_EXCHANGE_TYPE("exchangeType"), - PARAM_ROUTING_KEY("routingKey"), - PARAM_DELIVERY_MODE("deliveryMode"), - PARAM_DURABLE("durable"), - PARAM_EXCLUSIVE("exclusive"), - PARAM_AUTO_DELETE("autoDelete"), - PARAM_MAX_PRIORITY("maxPriority"); - - String propertyName; - - AMQPConfigurations(String propertyName) { - this.propertyName = propertyName; - } - - @Override - public String toString() { - return propertyName; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java deleted file mode 100644 index ab50f542cc..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPConstants.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.util; - -/** - * @author Ritu Parathody - */ -public class AMQPConstants { - - /** this when set will create a rabbitmq queue */ - public static String AMQP_QUEUE_TYPE = "amqp_queue"; - /** this when set will create a rabbitmq exchange */ - public static String AMQP_EXCHANGE_TYPE = "amqp_exchange"; - - public static String PROPERTY_KEY_TEMPLATE = "conductor.event-queues.amqp.%s"; - - /** default content type for the message read from rabbitmq */ - public static String DEFAULT_CONTENT_TYPE = "application/json"; - - /** default encoding for the message read from rabbitmq */ - public static String DEFAULT_CONTENT_ENCODING = "UTF-8"; - - /** default rabbitmq exchange type */ - public static String DEFAULT_EXCHANGE_TYPE = "topic"; - - /** - * default rabbitmq durability When set to true the queues are persisted to the disk. - * - *

{@see RabbitMQ}. - */ - public static boolean DEFAULT_DURABLE = true; - - /** - * default rabbitmq exclusivity When set to true the queues can be only used by one connection. - * - *

{@see RabbitMQ}. - */ - public static boolean DEFAULT_EXCLUSIVE = false; - - /** - * default rabbitmq auto delete When set to true the queues will be deleted when the last - * consumer is cancelled - * - *

{@see RabbitMQ}. - */ - public static boolean DEFAULT_AUTO_DELETE = false; - /** - * default rabbitmq delivery mode This is a property of the message When set to 1 the will be - * non persistent and 2 will be persistent {@see Consumer Prefetch}. - */ - public static int DEFAULT_BATCH_SIZE = 1; - /** - * default rabbitmq delivery mode This is a property of the amqp implementation which sets teh - * polling time to drain the in-memory queue. - */ - public static int DEFAULT_POLL_TIME_MS = 100; - - // info channel messages. - public static final String INFO_CHANNEL_BORROW_SUCCESS = - "Borrowed the channel object from the channel pool for " + "the connection type [%s]"; - public static final String INFO_CHANNEL_RETURN_SUCCESS = - "Returned the borrowed channel object to the pool for " + "the connection type [%s]"; - public static final String INFO_CHANNEL_CREATION_SUCCESS = - "Channels are not available in the pool. Created a" - + " channel for the connection type [%s]"; - public static final String INFO_CHANNEL_RESET_SUCCESS = - "No proper channels available in the pool. Created a " - + "channel for the connection type [%s]"; -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java deleted file mode 100644 index 003bbfa60e..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/AMQPSettings.java +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.util; - -import java.util.Arrays; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; - -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_AUTO_DELETE; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_DELIVERY_MODE; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_DURABLE; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_EXCHANGE_TYPE; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_EXCLUSIVE; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_MAX_PRIORITY; -import static com.netflix.conductor.contribs.queue.amqp.util.AMQPConfigurations.PARAM_ROUTING_KEY; - -/** - * @author Ritu Parathody - */ -public class AMQPSettings { - - private static final Pattern URI_PATTERN = - Pattern.compile( - "^(?:amqp\\_(queue|exchange))?\\:?(?[^\\?]+)\\??(?.*)$", - Pattern.CASE_INSENSITIVE); - - private String queueOrExchangeName; - private String eventName; - private String exchangeType; - private String queueType; - private String routingKey; - private final String contentEncoding; - private final String contentType; - private boolean durable; - private boolean exclusive; - private boolean autoDelete; - private boolean sequentialProcessing; - private int deliveryMode; - - private final Map arguments = new HashMap<>(); - private static final Logger LOGGER = LoggerFactory.getLogger(AMQPSettings.class); - - public AMQPSettings(final AMQPEventQueueProperties properties) { - // Initialize with a default values - durable = properties.isDurable(); - exclusive = properties.isExclusive(); - autoDelete = properties.isAutoDelete(); - contentType = properties.getContentType(); - contentEncoding = properties.getContentEncoding(); - exchangeType = properties.getExchangeType(); - routingKey = StringUtils.EMPTY; - queueType = properties.getQueueType(); - sequentialProcessing = properties.isSequentialMsgProcessing(); - // Set common settings for publishing and consuming - setDeliveryMode(properties.getDeliveryMode()); - } - - public final boolean isDurable() { - return durable; - } - - public final boolean isExclusive() { - return exclusive; - } - - public final boolean autoDelete() { - return autoDelete; - } - - public final Map getArguments() { - return arguments; - } - - public final String getContentEncoding() { - return contentEncoding; - } - - /** - * Use queue for publishing - * - * @param queueName the name of queue - */ - public void setQueue(String queueName) { - if (StringUtils.isEmpty(queueName)) { - throw new IllegalArgumentException("Queue name for publishing is undefined"); - } - this.queueOrExchangeName = queueName; - } - - public String getQueueOrExchangeName() { - return queueOrExchangeName; - } - - public String getExchangeType() { - return exchangeType; - } - - public String getRoutingKey() { - return routingKey; - } - - public int getDeliveryMode() { - return deliveryMode; - } - - public AMQPSettings setDeliveryMode(int deliveryMode) { - if (deliveryMode != 1 && deliveryMode != 2) { - throw new IllegalArgumentException("Delivery mode must be 1 or 2"); - } - this.deliveryMode = deliveryMode; - return this; - } - - public String getContentType() { - return contentType; - } - - /** - * Complete settings from the queue URI. - * - *

Example for queue: - * - *

-     * amqp-queue:myQueue?deliveryMode=1&autoDelete=true&exclusive=true
-     * 
- * - * Example for exchange: - * - *
-     * amqp-exchange:myExchange?exchangeType=topic&routingKey=myRoutingKey&exclusive=true
-     * 
- * - * @param queueURI - * @return - */ - public final AMQPSettings fromURI(final String queueURI) { - final Matcher matcher = URI_PATTERN.matcher(queueURI); - if (!matcher.matches()) { - throw new IllegalArgumentException("Queue URI doesn't matches the expected regexp"); - } - - // Set name of queue or exchange from group "name" - LOGGER.info("Queue URI:{}", queueURI); - queueOrExchangeName = matcher.group("name"); - eventName = queueURI; - if (matcher.groupCount() > 1) { - final String queryParams = matcher.group("params"); - if (StringUtils.isNotEmpty(queryParams)) { - // Handle parameters - Arrays.stream(queryParams.split("\\s*\\&\\s*")) - .forEach( - param -> { - final String[] kv = param.split("\\s*=\\s*"); - if (kv.length == 2) { - if (kv[0].equalsIgnoreCase( - String.valueOf(PARAM_EXCHANGE_TYPE))) { - String value = kv[1]; - if (StringUtils.isEmpty(value)) { - throw new IllegalArgumentException( - "The provided exchange type is empty"); - } - exchangeType = value; - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_ROUTING_KEY)))) { - String value = kv[1]; - if (StringUtils.isEmpty(value)) { - throw new IllegalArgumentException( - "The provided routing key is empty"); - } - routingKey = value; - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_DURABLE)))) { - durable = Boolean.parseBoolean(kv[1]); - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_EXCLUSIVE)))) { - exclusive = Boolean.parseBoolean(kv[1]); - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_AUTO_DELETE)))) { - autoDelete = Boolean.parseBoolean(kv[1]); - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_DELIVERY_MODE)))) { - setDeliveryMode(Integer.parseInt(kv[1])); - } - if (kv[0].equalsIgnoreCase( - (String.valueOf(PARAM_MAX_PRIORITY)))) { - arguments.put("x-max-priority", Integer.valueOf(kv[1])); - } - } - }); - } - } - return this; - } - - @Override - public boolean equals(Object obj) { - if (this == obj) return true; - if (!(obj instanceof AMQPSettings)) return false; - AMQPSettings other = (AMQPSettings) obj; - return Objects.equals(arguments, other.arguments) - && autoDelete == other.autoDelete - && Objects.equals(contentEncoding, other.contentEncoding) - && Objects.equals(contentType, other.contentType) - && deliveryMode == other.deliveryMode - && durable == other.durable - && Objects.equals(eventName, other.eventName) - && Objects.equals(exchangeType, other.exchangeType) - && exclusive == other.exclusive - && Objects.equals(queueOrExchangeName, other.queueOrExchangeName) - && Objects.equals(queueType, other.queueType) - && Objects.equals(routingKey, other.routingKey) - && sequentialProcessing == other.sequentialProcessing; - } - - @Override - public int hashCode() { - return Objects.hash( - arguments, - autoDelete, - contentEncoding, - contentType, - deliveryMode, - durable, - eventName, - exchangeType, - exclusive, - queueOrExchangeName, - queueType, - routingKey, - sequentialProcessing); - } - - @Override - public String toString() { - return "AMQPSettings [queueOrExchangeName=" - + queueOrExchangeName - + ", eventName=" - + eventName - + ", exchangeType=" - + exchangeType - + ", queueType=" - + queueType - + ", routingKey=" - + routingKey - + ", contentEncoding=" - + contentEncoding - + ", contentType=" - + contentType - + ", durable=" - + durable - + ", exclusive=" - + exclusive - + ", autoDelete=" - + autoDelete - + ", sequentialProcessing=" - + sequentialProcessing - + ", deliveryMode=" - + deliveryMode - + ", arguments=" - + arguments - + "]"; - } - - public String getEventName() { - return eventName; - } - - /** - * @return the queueType - */ - public String getQueueType() { - return queueType; - } - - /** - * @return the sequentialProcessing - */ - public boolean isSequentialProcessing() { - return sequentialProcessing; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java deleted file mode 100644 index d1f06ff990..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/ConnectionType.java +++ /dev/null @@ -1,18 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.util; - -public enum ConnectionType { - PUBLISHER, - SUBSCRIBER -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java deleted file mode 100644 index a8b0725766..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/amqp/util/RetryType.java +++ /dev/null @@ -1,20 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp.util; - -/** RetryType holds the retry type */ -public enum RetryType { - REGULARINTERVALS, - EXPONENTIALBACKOFF, - INCREMENTALINTERVALS -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java deleted file mode 100644 index 34c14f9446..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSAbstractQueue.java +++ /dev/null @@ -1,301 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats; - -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.Executors; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -import io.nats.client.NUID; -import rx.Observable; -import rx.Scheduler; - -/** - * @author Oleksiy Lysak - */ -public abstract class NATSAbstractQueue implements ObservableQueue { - - private static final Logger LOGGER = LoggerFactory.getLogger(NATSAbstractQueue.class); - protected LinkedBlockingQueue messages = new LinkedBlockingQueue<>(); - protected final Lock mu = new ReentrantLock(); - private final String queueType; - private ScheduledExecutorService execs; - private final Scheduler scheduler; - - protected final String queueURI; - protected final String subject; - protected String queue; - - // Indicates that observe was called (Event Handler) and we must to re-initiate subscription - // upon reconnection - private boolean observable; - private boolean isOpened; - private volatile boolean running; - - NATSAbstractQueue(String queueURI, String queueType, Scheduler scheduler) { - this.queueURI = queueURI; - this.queueType = queueType; - this.scheduler = scheduler; - - // If queue specified (e.g. subject:queue) - split to subject & queue - if (queueURI.contains(":")) { - this.subject = queueURI.substring(0, queueURI.indexOf(':')); - queue = queueURI.substring(queueURI.indexOf(':') + 1); - } else { - this.subject = queueURI; - queue = null; - } - LOGGER.info( - String.format( - "Initialized with queueURI=%s, subject=%s, queue=%s", - queueURI, subject, queue)); - } - - void onMessage(String subject, byte[] data) { - String payload = new String(data); - LOGGER.info(String.format("Received message for %s: %s", subject, payload)); - - Message dstMsg = new Message(); - dstMsg.setId(NUID.nextGlobal()); - dstMsg.setPayload(payload); - - messages.add(dstMsg); - } - - @Override - public Observable observe() { - LOGGER.info("Observe invoked for queueURI " + queueURI); - observable = true; - - mu.lock(); - try { - subscribe(); - } finally { - mu.unlock(); - } - - Observable.OnSubscribe onSubscribe = - subscriber -> { - Observable interval = - Observable.interval(100, TimeUnit.MILLISECONDS, scheduler); - interval.flatMap( - (Long x) -> { - if (!isRunning()) { - LOGGER.debug( - "Component stopped, skip listening for messages from NATS Queue"); - return Observable.from(Collections.emptyList()); - } else { - List available = new LinkedList<>(); - messages.drainTo(available); - - if (!available.isEmpty()) { - AtomicInteger count = new AtomicInteger(0); - StringBuilder buffer = new StringBuilder(); - available.forEach( - msg -> { - buffer.append(msg.getId()) - .append("=") - .append(msg.getPayload()); - count.incrementAndGet(); - - if (count.get() < available.size()) { - buffer.append(","); - } - }); - LOGGER.info( - String.format( - "Batch from %s to conductor is %s", - subject, buffer.toString())); - } - - return Observable.from(available); - } - }) - .subscribe(subscriber::onNext, subscriber::onError); - }; - return Observable.create(onSubscribe); - } - - @Override - public String getType() { - return queueType; - } - - @Override - public String getName() { - return queueURI; - } - - @Override - public String getURI() { - return queueURI; - } - - @Override - public List ack(List messages) { - return Collections.emptyList(); - } - - @Override - public void setUnackTimeout(Message message, long unackTimeout) {} - - @Override - public long size() { - return messages.size(); - } - - @Override - public void publish(List messages) { - messages.forEach( - message -> { - try { - String payload = message.getPayload(); - publish(subject, payload.getBytes()); - LOGGER.info(String.format("Published message to %s: %s", subject, payload)); - } catch (Exception ex) { - LOGGER.error( - "Failed to publish message " - + message.getPayload() - + " to " - + subject, - ex); - throw new RuntimeException(ex); - } - }); - } - - @Override - public boolean rePublishIfNoAck() { - return true; - } - - @Override - public void close() { - LOGGER.info("Closing connection for " + queueURI); - mu.lock(); - try { - if (execs != null) { - execs.shutdownNow(); - execs = null; - } - closeSubs(); - closeConn(); - isOpened = false; - } finally { - mu.unlock(); - } - } - - public void open() { - // do nothing if not closed - if (isOpened) { - return; - } - - mu.lock(); - try { - try { - connect(); - - // Re-initiated subscription if existed - if (observable) { - subscribe(); - } - } catch (Exception ignore) { - } - - execs = Executors.newScheduledThreadPool(1); - execs.scheduleAtFixedRate(this::monitor, 0, 500, TimeUnit.MILLISECONDS); - isOpened = true; - } finally { - mu.unlock(); - } - } - - private void monitor() { - if (isConnected()) { - return; - } - - LOGGER.error("Monitor invoked for " + queueURI); - mu.lock(); - try { - closeSubs(); - closeConn(); - - // Connect - connect(); - - // Re-initiated subscription if existed - if (observable) { - subscribe(); - } - } catch (Exception ex) { - LOGGER.error("Monitor failed with " + ex.getMessage() + " for " + queueURI, ex); - } finally { - mu.unlock(); - } - } - - public boolean isClosed() { - return !isOpened; - } - - void ensureConnected() { - if (!isConnected()) { - throw new RuntimeException("No nats connection"); - } - } - - @Override - public void start() { - LOGGER.info("Started listening to {}:{}", getClass().getSimpleName(), queueURI); - running = true; - } - - @Override - public void stop() { - LOGGER.info("Stopped listening to {}:{}", getClass().getSimpleName(), queueURI); - running = false; - } - - @Override - public boolean isRunning() { - return running; - } - - abstract void connect(); - - abstract boolean isConnected(); - - abstract void publish(String subject, byte[] data) throws Exception; - - abstract void subscribe(); - - abstract void closeSubs(); - - abstract void closeConn(); -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java deleted file mode 100644 index 9bcb05b70d..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSObservableQueue.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import io.nats.client.Connection; -import io.nats.client.ConnectionFactory; -import io.nats.client.Subscription; -import rx.Scheduler; - -/** - * @author Oleksiy Lysak - */ -public class NATSObservableQueue extends NATSAbstractQueue { - - private static final Logger LOGGER = LoggerFactory.getLogger(NATSObservableQueue.class); - private final ConnectionFactory fact; - private Subscription subs; - private Connection conn; - - public NATSObservableQueue(ConnectionFactory factory, String queueURI, Scheduler scheduler) { - super(queueURI, "nats", scheduler); - this.fact = factory; - open(); - } - - @Override - public boolean isConnected() { - return (conn != null && conn.isConnected()); - } - - @Override - public void connect() { - try { - Connection temp = fact.createConnection(); - LOGGER.info("Successfully connected for " + queueURI); - temp.setReconnectedCallback( - (event) -> LOGGER.warn("onReconnect. Reconnected back for " + queueURI)); - temp.setDisconnectedCallback( - (event -> LOGGER.warn("onDisconnect. Disconnected for " + queueURI))); - conn = temp; - } catch (Exception e) { - LOGGER.error("Unable to establish nats connection for " + queueURI, e); - throw new RuntimeException(e); - } - } - - @Override - public void subscribe() { - // do nothing if already subscribed - if (subs != null) { - return; - } - - try { - ensureConnected(); - // Create subject/queue subscription if the queue has been provided - if (StringUtils.isNotEmpty(queue)) { - LOGGER.info( - "No subscription. Creating a queue subscription. subject={}, queue={}", - subject, - queue); - subs = - conn.subscribe( - subject, queue, msg -> onMessage(msg.getSubject(), msg.getData())); - } else { - LOGGER.info( - "No subscription. Creating a pub/sub subscription. subject={}", subject); - subs = conn.subscribe(subject, msg -> onMessage(msg.getSubject(), msg.getData())); - } - } catch (Exception ex) { - LOGGER.error( - "Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI, - ex); - } - } - - @Override - public void publish(String subject, byte[] data) throws Exception { - ensureConnected(); - conn.publish(subject, data); - } - - @Override - public void closeSubs() { - if (subs != null) { - try { - subs.close(); - } catch (Exception ex) { - LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex); - } - subs = null; - } - } - - @Override - public void closeConn() { - if (conn != null) { - try { - conn.close(); - } catch (Exception ex) { - LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex); - } - conn = null; - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSStreamObservableQueue.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSStreamObservableQueue.java deleted file mode 100644 index f74ebb4cd8..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/NATSStreamObservableQueue.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats; - -import java.util.UUID; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import io.nats.streaming.StreamingConnection; -import io.nats.streaming.StreamingConnectionFactory; -import io.nats.streaming.Subscription; -import io.nats.streaming.SubscriptionOptions; -import rx.Scheduler; - -/** - * @author Oleksiy Lysak - */ -public class NATSStreamObservableQueue extends NATSAbstractQueue { - - private static final Logger LOGGER = LoggerFactory.getLogger(NATSStreamObservableQueue.class); - private final StreamingConnectionFactory fact; - private StreamingConnection conn; - private Subscription subs; - private final String durableName; - - public NATSStreamObservableQueue( - String clusterId, - String natsUrl, - String durableName, - String queueURI, - Scheduler scheduler) { - super(queueURI, "nats_stream", scheduler); - this.fact = new StreamingConnectionFactory(); - this.fact.setClusterId(clusterId); - this.fact.setClientId(UUID.randomUUID().toString()); - this.fact.setNatsUrl(natsUrl); - this.durableName = durableName; - open(); - } - - @Override - public boolean isConnected() { - return (conn != null - && conn.getNatsConnection() != null - && conn.getNatsConnection().isConnected()); - } - - @Override - public void connect() { - try { - StreamingConnection temp = fact.createConnection(); - LOGGER.info("Successfully connected for " + queueURI); - temp.getNatsConnection() - .setReconnectedCallback( - (event) -> - LOGGER.warn("onReconnect. Reconnected back for " + queueURI)); - temp.getNatsConnection() - .setDisconnectedCallback( - (event -> LOGGER.warn("onDisconnect. Disconnected for " + queueURI))); - conn = temp; - } catch (Exception e) { - LOGGER.error("Unable to establish nats streaming connection for " + queueURI, e); - throw new RuntimeException(e); - } - } - - @Override - public void subscribe() { - // do nothing if already subscribed - if (subs != null) { - return; - } - - try { - ensureConnected(); - SubscriptionOptions subscriptionOptions = - new SubscriptionOptions.Builder().durableName(durableName).build(); - // Create subject/queue subscription if the queue has been provided - if (StringUtils.isNotEmpty(queue)) { - LOGGER.info( - "No subscription. Creating a queue subscription. subject={}, queue={}", - subject, - queue); - subs = - conn.subscribe( - subject, - queue, - msg -> onMessage(msg.getSubject(), msg.getData()), - subscriptionOptions); - } else { - LOGGER.info( - "No subscription. Creating a pub/sub subscription. subject={}", subject); - subs = - conn.subscribe( - subject, - msg -> onMessage(msg.getSubject(), msg.getData()), - subscriptionOptions); - } - } catch (Exception ex) { - LOGGER.error( - "Subscription failed with " + ex.getMessage() + " for queueURI " + queueURI, - ex); - } - } - - @Override - public void publish(String subject, byte[] data) throws Exception { - ensureConnected(); - conn.publish(subject, data); - } - - @Override - public void closeSubs() { - if (subs != null) { - try { - subs.close(true); - } catch (Exception ex) { - LOGGER.error("closeSubs failed with " + ex.getMessage() + " for " + queueURI, ex); - } - subs = null; - } - } - - @Override - public void closeConn() { - if (conn != null) { - try { - conn.close(); - } catch (Exception ex) { - LOGGER.error("closeConn failed with " + ex.getMessage() + " for " + queueURI, ex); - } - conn = null; - } - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSConfiguration.java deleted file mode 100644 index 19eb05f4f4..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSConfiguration.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats.config; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.core.env.Environment; - -import com.netflix.conductor.core.events.EventQueueProvider; - -import rx.Scheduler; - -@Configuration -@ConditionalOnProperty(name = "conductor.event-queues.nats.enabled", havingValue = "true") -public class NATSConfiguration { - - @Bean - public EventQueueProvider natsEventQueueProvider(Environment environment, Scheduler scheduler) { - return new NATSEventQueueProvider(environment, scheduler); - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java deleted file mode 100644 index a23252d571..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSEventQueueProvider.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats.config; - -import java.util.Map; -import java.util.Properties; -import java.util.concurrent.ConcurrentHashMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.core.env.Environment; -import org.springframework.lang.NonNull; - -import com.netflix.conductor.contribs.queue.nats.NATSObservableQueue; -import com.netflix.conductor.core.events.EventQueueProvider; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -import io.nats.client.ConnectionFactory; -import rx.Scheduler; - -/** - * @author Oleksiy Lysak - */ -public class NATSEventQueueProvider implements EventQueueProvider { - - private static final Logger LOGGER = LoggerFactory.getLogger(NATSEventQueueProvider.class); - - protected Map queues = new ConcurrentHashMap<>(); - private final ConnectionFactory factory; - private final Scheduler scheduler; - - public NATSEventQueueProvider(Environment environment, Scheduler scheduler) { - this.scheduler = scheduler; - LOGGER.info("NATS Event Queue Provider init"); - - // Init NATS API. Handle "io_nats" and "io.nats" ways to specify parameters - Properties props = new Properties(); - Properties temp = new Properties(); - temp.putAll(System.getenv()); - temp.putAll(System.getProperties()); - temp.forEach( - (k, v) -> { - String key = k.toString(); - String val = v.toString(); - - if (key.startsWith("io_nats")) { - key = key.replace("_", "."); - } - props.put(key, environment.getProperty(key, val)); - }); - - // Init NATS API - factory = new ConnectionFactory(props); - LOGGER.info("NATS Event Queue Provider initialized..."); - } - - @Override - public String getQueueType() { - return "nats"; - } - - @Override - @NonNull - public ObservableQueue getQueue(String queueURI) { - NATSObservableQueue queue = - queues.computeIfAbsent( - queueURI, q -> new NATSObservableQueue(factory, queueURI, scheduler)); - if (queue.isClosed()) { - queue.open(); - } - return queue; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamConfiguration.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamConfiguration.java deleted file mode 100644 index 3721e460ff..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamConfiguration.java +++ /dev/null @@ -1,75 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats.config; - -import java.util.HashMap; -import java.util.Map; - -import org.apache.commons.lang3.StringUtils; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.common.metadata.tasks.Task; -import com.netflix.conductor.contribs.queue.nats.NATSStreamObservableQueue; -import com.netflix.conductor.core.config.ConductorProperties; -import com.netflix.conductor.core.events.EventQueueProvider; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -import rx.Scheduler; - -@Configuration -@EnableConfigurationProperties(NATSStreamProperties.class) -@ConditionalOnProperty(name = "conductor.event-queues.nats-stream.enabled", havingValue = "true") -public class NATSStreamConfiguration { - - @Bean - public EventQueueProvider natsEventQueueProvider( - NATSStreamProperties properties, Scheduler scheduler) { - return new NATSStreamEventQueueProvider(properties, scheduler); - } - - @ConditionalOnProperty(name = "conductor.default-event-queue.type", havingValue = "nats_stream") - @Bean - public Map getQueues( - ConductorProperties conductorProperties, - NATSStreamProperties properties, - Scheduler scheduler) { - String stack = ""; - if (conductorProperties.getStack() != null && conductorProperties.getStack().length() > 0) { - stack = conductorProperties.getStack() + "_"; - } - Task.Status[] statuses = new Task.Status[] {Task.Status.COMPLETED, Task.Status.FAILED}; - Map queues = new HashMap<>(); - for (Task.Status status : statuses) { - String queuePrefix = - StringUtils.isBlank(properties.getListenerQueuePrefix()) - ? conductorProperties.getAppId() + "_nats_stream_notify_" + stack - : properties.getListenerQueuePrefix(); - - String queueName = queuePrefix + status.name(); - - ObservableQueue queue = - new NATSStreamObservableQueue( - properties.getClusterId(), - properties.getUrl(), - properties.getDurableName(), - queueName, - scheduler); - queues.put(status, queue); - } - - return queues; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamEventQueueProvider.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamEventQueueProvider.java deleted file mode 100644 index 74738b97b0..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamEventQueueProvider.java +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats.config; - -import java.util.Map; -import java.util.concurrent.ConcurrentHashMap; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.lang.NonNull; - -import com.netflix.conductor.contribs.queue.nats.NATSStreamObservableQueue; -import com.netflix.conductor.core.events.EventQueueProvider; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -import rx.Scheduler; - -/** - * @author Oleksiy Lysak - */ -public class NATSStreamEventQueueProvider implements EventQueueProvider { - - private static final Logger LOGGER = - LoggerFactory.getLogger(NATSStreamEventQueueProvider.class); - protected final Map queues = new ConcurrentHashMap<>(); - private final String durableName; - private final String clusterId; - private final String natsUrl; - private final Scheduler scheduler; - - public NATSStreamEventQueueProvider(NATSStreamProperties properties, Scheduler scheduler) { - LOGGER.info("NATS Stream Event Queue Provider init"); - this.scheduler = scheduler; - - // Get NATS Streaming options - clusterId = properties.getClusterId(); - durableName = properties.getDurableName(); - natsUrl = properties.getUrl(); - - LOGGER.info( - "NATS Streaming clusterId=" - + clusterId - + ", natsUrl=" - + natsUrl - + ", durableName=" - + durableName); - LOGGER.info("NATS Stream Event Queue Provider initialized..."); - } - - @Override - public String getQueueType() { - return "nats_stream"; - } - - @Override - @NonNull - public ObservableQueue getQueue(String queueURI) { - NATSStreamObservableQueue queue = - queues.computeIfAbsent( - queueURI, - q -> - new NATSStreamObservableQueue( - clusterId, natsUrl, durableName, queueURI, scheduler)); - if (queue.isClosed()) { - queue.open(); - } - return queue; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamProperties.java b/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamProperties.java deleted file mode 100644 index c949963df0..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/queue/nats/config/NATSStreamProperties.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.nats.config; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -import io.nats.client.Nats; - -@ConfigurationProperties("conductor.event-queues.nats-stream") -public class NATSStreamProperties { - - /** The cluster id of the STAN session */ - private String clusterId = "test-cluster"; - - /** The durable subscriber name for the subscription */ - private String durableName = null; - - /** The NATS connection url */ - private String url = Nats.DEFAULT_URL; - - /** The prefix to be used for the default listener queues */ - private String listenerQueuePrefix = ""; - - public String getClusterId() { - return clusterId; - } - - public void setClusterId(String clusterId) { - this.clusterId = clusterId; - } - - public String getDurableName() { - return durableName; - } - - public void setDurableName(String durableName) { - this.durableName = durableName; - } - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public String getListenerQueuePrefix() { - return listenerQueuePrefix; - } - - public void setListenerQueuePrefix(String listenerQueuePrefix) { - this.listenerQueuePrefix = listenerQueuePrefix; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java b/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java deleted file mode 100644 index 4095af478e..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManager.java +++ /dev/null @@ -1,111 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.tasks.kafka; - -import java.time.Duration; -import java.util.Objects; -import java.util.Properties; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; - -import org.apache.kafka.clients.producer.KafkaProducer; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Value; -import org.springframework.stereotype.Component; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; - -@SuppressWarnings("rawtypes") -@Component -public class KafkaProducerManager { - - private static final Logger LOGGER = LoggerFactory.getLogger(KafkaProducerManager.class); - - private final String requestTimeoutConfig; - private final Cache kafkaProducerCache; - private final String maxBlockMsConfig; - - private static final String STRING_SERIALIZER = - "org.apache.kafka.common.serialization.StringSerializer"; - private static final RemovalListener LISTENER = - notification -> { - if (notification.getValue() != null) { - notification.getValue().close(); - LOGGER.info("Closed producer for {}", notification.getKey()); - } - }; - - @Autowired - public KafkaProducerManager( - @Value("${conductor.tasks.kafka-publish.requestTimeout:100ms}") Duration requestTimeout, - @Value("${conductor.tasks.kafka-publish.maxBlock:500ms}") Duration maxBlock, - @Value("${conductor.tasks.kafka-publish.cacheSize:10}") int cacheSize, - @Value("${conductor.tasks.kafka-publish.cacheTime:120000ms}") Duration cacheTime) { - this.requestTimeoutConfig = String.valueOf(requestTimeout.toMillis()); - this.maxBlockMsConfig = String.valueOf(maxBlock.toMillis()); - this.kafkaProducerCache = - CacheBuilder.newBuilder() - .removalListener(LISTENER) - .maximumSize(cacheSize) - .expireAfterAccess(cacheTime.toMillis(), TimeUnit.MILLISECONDS) - .build(); - } - - public Producer getProducer(KafkaPublishTask.Input input) { - Properties configProperties = getProducerProperties(input); - return getFromCache(configProperties, () -> new KafkaProducer(configProperties)); - } - - @VisibleForTesting - Producer getFromCache(Properties configProperties, Callable createProducerCallable) { - try { - return kafkaProducerCache.get(configProperties, createProducerCallable); - } catch (ExecutionException e) { - throw new RuntimeException(e); - } - } - - @VisibleForTesting - Properties getProducerProperties(KafkaPublishTask.Input input) { - - Properties configProperties = new Properties(); - configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, input.getBootStrapServers()); - - configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, input.getKeySerializer()); - - String requestTimeoutMs = requestTimeoutConfig; - - if (Objects.nonNull(input.getRequestTimeoutMs())) { - requestTimeoutMs = String.valueOf(input.getRequestTimeoutMs()); - } - - String maxBlockMs = maxBlockMsConfig; - - if (Objects.nonNull(input.getMaxBlockMs())) { - maxBlockMs = String.valueOf(input.getMaxBlockMs()); - } - - configProperties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeoutMs); - configProperties.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, maxBlockMs); - configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, STRING_SERIALIZER); - return configProperties; - } -} diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java b/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java deleted file mode 100644 index 8ec91a0396..0000000000 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTask.java +++ /dev/null @@ -1,313 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.tasks.kafka; - -import java.time.Instant; -import java.util.HashMap; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.StringUtils; -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerRecord; -import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.header.Header; -import org.apache.kafka.common.header.internals.RecordHeader; -import org.apache.kafka.common.serialization.IntegerSerializer; -import org.apache.kafka.common.serialization.LongSerializer; -import org.apache.kafka.common.serialization.StringSerializer; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.stereotype.Component; - -import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.core.execution.tasks.WorkflowSystemTask; -import com.netflix.conductor.core.utils.Utils; -import com.netflix.conductor.model.TaskModel; -import com.netflix.conductor.model.WorkflowModel; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; - -import static com.netflix.conductor.common.metadata.tasks.TaskType.TASK_TYPE_KAFKA_PUBLISH; - -@Component(TASK_TYPE_KAFKA_PUBLISH) -public class KafkaPublishTask extends WorkflowSystemTask { - - private static final Logger LOGGER = LoggerFactory.getLogger(KafkaPublishTask.class); - - static final String REQUEST_PARAMETER_NAME = "kafka_request"; - private static final String MISSING_REQUEST = - "Missing Kafka request. Task input MUST have a '" - + REQUEST_PARAMETER_NAME - + "' key with KafkaTask.Input as value. See documentation for KafkaTask for required input parameters"; - private static final String MISSING_BOOT_STRAP_SERVERS = "No boot strap servers specified"; - private static final String MISSING_KAFKA_TOPIC = - "Missing Kafka topic. See documentation for KafkaTask for required input parameters"; - private static final String MISSING_KAFKA_VALUE = - "Missing Kafka value. See documentation for KafkaTask for required input parameters"; - private static final String FAILED_TO_INVOKE = "Failed to invoke kafka task due to: "; - - private final ObjectMapper objectMapper; - private final String requestParameter; - private final KafkaProducerManager producerManager; - - @Autowired - public KafkaPublishTask(KafkaProducerManager clientManager, ObjectMapper objectMapper) { - super(TASK_TYPE_KAFKA_PUBLISH); - this.requestParameter = REQUEST_PARAMETER_NAME; - this.producerManager = clientManager; - this.objectMapper = objectMapper; - LOGGER.info("KafkaTask initialized."); - } - - @Override - public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { - - long taskStartMillis = Instant.now().toEpochMilli(); - task.setWorkerId(Utils.getServerId()); - Object request = task.getInputData().get(requestParameter); - - if (Objects.isNull(request)) { - markTaskAsFailed(task, MISSING_REQUEST); - return; - } - - Input input = objectMapper.convertValue(request, Input.class); - - if (StringUtils.isBlank(input.getBootStrapServers())) { - markTaskAsFailed(task, MISSING_BOOT_STRAP_SERVERS); - return; - } - - if (StringUtils.isBlank(input.getTopic())) { - markTaskAsFailed(task, MISSING_KAFKA_TOPIC); - return; - } - - if (Objects.isNull(input.getValue())) { - markTaskAsFailed(task, MISSING_KAFKA_VALUE); - return; - } - - try { - Future recordMetaDataFuture = kafkaPublish(input); - try { - recordMetaDataFuture.get(); - if (isAsyncComplete(task)) { - task.setStatus(TaskModel.Status.IN_PROGRESS); - } else { - task.setStatus(TaskModel.Status.COMPLETED); - } - long timeTakenToCompleteTask = Instant.now().toEpochMilli() - taskStartMillis; - LOGGER.debug("Published message {}, Time taken {}", input, timeTakenToCompleteTask); - - } catch (ExecutionException ec) { - LOGGER.error( - "Failed to invoke kafka task: {} - execution exception ", - task.getTaskId(), - ec); - markTaskAsFailed(task, FAILED_TO_INVOKE + ec.getMessage()); - } - } catch (Exception e) { - LOGGER.error( - "Failed to invoke kafka task:{} for input {} - unknown exception", - task.getTaskId(), - input, - e); - markTaskAsFailed(task, FAILED_TO_INVOKE + e.getMessage()); - } - } - - private void markTaskAsFailed(TaskModel task, String reasonForIncompletion) { - task.setReasonForIncompletion(reasonForIncompletion); - task.setStatus(TaskModel.Status.FAILED); - } - - /** - * @param input Kafka Request - * @return Future for execution. - */ - @SuppressWarnings({"unchecked", "rawtypes"}) - private Future kafkaPublish(Input input) throws Exception { - - long startPublishingEpochMillis = Instant.now().toEpochMilli(); - - Producer producer = producerManager.getProducer(input); - - long timeTakenToCreateProducer = Instant.now().toEpochMilli() - startPublishingEpochMillis; - - LOGGER.debug("Time taken getting producer {}", timeTakenToCreateProducer); - - Object key = getKey(input); - - Iterable

headers = - input.getHeaders().entrySet().stream() - .map( - header -> - new RecordHeader( - header.getKey(), - String.valueOf(header.getValue()).getBytes())) - .collect(Collectors.toList()); - ProducerRecord rec = - new ProducerRecord( - input.getTopic(), - null, - null, - key, - objectMapper.writeValueAsString(input.getValue()), - headers); - - Future send = producer.send(rec); - - long timeTakenToPublish = Instant.now().toEpochMilli() - startPublishingEpochMillis; - - LOGGER.debug("Time taken publishing {}", timeTakenToPublish); - - return send; - } - - @VisibleForTesting - Object getKey(Input input) { - String keySerializer = input.getKeySerializer(); - - if (LongSerializer.class.getCanonicalName().equals(keySerializer)) { - return Long.parseLong(String.valueOf(input.getKey())); - } else if (IntegerSerializer.class.getCanonicalName().equals(keySerializer)) { - return Integer.parseInt(String.valueOf(input.getKey())); - } else { - return String.valueOf(input.getKey()); - } - } - - @Override - public boolean execute(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { - return false; - } - - @Override - public void cancel(WorkflowModel workflow, TaskModel task, WorkflowExecutor executor) { - task.setStatus(TaskModel.Status.CANCELED); - } - - @Override - public boolean isAsync() { - return true; - } - - public static class Input { - - public static final String STRING_SERIALIZER = StringSerializer.class.getCanonicalName(); - private Map headers = new HashMap<>(); - private String bootStrapServers; - private Object key; - private Object value; - private Integer requestTimeoutMs; - private Integer maxBlockMs; - private String topic; - private String keySerializer = STRING_SERIALIZER; - - public Map getHeaders() { - return headers; - } - - public void setHeaders(Map headers) { - this.headers = headers; - } - - public String getBootStrapServers() { - return bootStrapServers; - } - - public void setBootStrapServers(String bootStrapServers) { - this.bootStrapServers = bootStrapServers; - } - - public Object getKey() { - return key; - } - - public void setKey(Object key) { - this.key = key; - } - - public Object getValue() { - return value; - } - - public void setValue(Object value) { - this.value = value; - } - - public Integer getRequestTimeoutMs() { - return requestTimeoutMs; - } - - public void setRequestTimeoutMs(Integer requestTimeoutMs) { - this.requestTimeoutMs = requestTimeoutMs; - } - - public String getTopic() { - return topic; - } - - public void setTopic(String topic) { - this.topic = topic; - } - - public String getKeySerializer() { - return keySerializer; - } - - public void setKeySerializer(String keySerializer) { - this.keySerializer = keySerializer; - } - - public Integer getMaxBlockMs() { - return maxBlockMs; - } - - public void setMaxBlockMs(Integer maxBlockMs) { - this.maxBlockMs = maxBlockMs; - } - - @Override - public String toString() { - return "Input{" - + "headers=" - + headers - + ", bootStrapServers='" - + bootStrapServers - + '\'' - + ", key=" - + key - + ", value=" - + value - + ", requestTimeoutMs=" - + requestTimeoutMs - + ", maxBlockMs=" - + maxBlockMs - + ", topic='" - + topic - + '\'' - + ", keySerializer='" - + keySerializer - + '\'' - + '}'; - } - } -} diff --git a/contribs/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/contribs/src/main/resources/META-INF/additional-spring-configuration-metadata.json deleted file mode 100644 index 3156ecbef3..0000000000 --- a/contribs/src/main/resources/META-INF/additional-spring-configuration-metadata.json +++ /dev/null @@ -1,136 +0,0 @@ -{ - "properties": [ - { - "name": "conductor.metrics-logger.reportPeriodSeconds", - "type": "java.lang.Long", - "description": "The interval (in seconds) at which the metrics will be reported into the log stream by the metrics-logger." - }, - { - "name": "conductor.tasks.http.readTimeout", - "type": "java.lang.Integer", - "description": "The read timeout of the underlying HttpClient used by the HTTP task." - }, - { - "name": "conductor.tasks.http.connectTimeout", - "type": "java.lang.Integer", - "description": "The connection timeout of the underlying HttpClient used by the HTTP task." - }, - { - "name": "conductor.tasks.kafka-publish.requestTimeoutMs", - "type": "java.lang.String", - "description": "The request.timeout.ms value that the kafka producer is configured with in the KAFKA_PUBLISH task." - }, - { - "name": "conductor.tasks.kafka-publish.maxBlockMs", - "type": "java.lang.String", - "description": "The max.block.ms value that the kafka producer is configured with in the KAFKA_PUBLISH task." - }, - { - "name": "conductor.tasks.kafka-publish.cacheSize", - "type": "java.lang.Integer", - "description": "The maximum number of entries permitted in the in-memory cache used by the KAFKA_PUBLISH task." - }, - { - "name": "conductor.tasks.kafka-publish.cacheTimeMs", - "type": "java.lang.Integer", - "description": "The duration after which a cached entry will be removed from the in-memory cache used by the KAFKA_PUBLISH task." - }, - { - "name": "conductor.workflow-status-listener.type", - "type": "java.lang.String", - "description": "The implementation of the workflow status listener to be used." - }, - { - "name": "conductor.workflow-execution-lock.type", - "type": "java.lang.String", - "description": "The implementation of the workflow execution lock to be used.", - "defaultValue": "noop_lock" - }, - { - "name": "conductor.event-queues.sqs.enabled", - "type": "java.lang.Boolean", - "description": "Enable the use of AWS SQS implementation to provide queues for consuming events.", - "sourceType": "com.netflix.conductor.contribs.queue.sqs.config.SQSEventQueueConfiguration" - }, - { - "name": "conductor.event-queues.amqp.enabled", - "type": "java.lang.Boolean", - "description": "Enable the use of RabbitMQ implementation to provide queues for consuming events.", - "sourceType": "com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueConfiguration" - }, - { - "name": "conductor.event-queues.nats.enabled", - "type": "java.lang.Boolean", - "description": "Enable the use of NATS implementation to provide queues for consuming events.", - "sourceType": "com.netflix.conductor.contribs.queue.nats.config.NATSConfiguration" - }, - { - "name": "conductor.event-queues.nats-stream.enabled", - "type": "java.lang.Boolean", - "description": "Enable the use of NATS Streaming implementation to provide queues for consuming events.", - "sourceType": "com.netflix.conductor.contribs.queue.nats.config.NATSStreamConfiguration" - }, - { - "name": "conductor.default-event-queue.type", - "type": "java.lang.String", - "description": "The default event queue type to listen on for the WAIT task." - } - ], - "hints": [ - { - "name": "conductor.workflow-status-listener.type", - "values": [ - { - "value": "stub", - "description": "Use the no-op implementation of the workflow status listener." - }, - { - "value": "archive", - "description": "Use then archive implementation which immediately archives the workflow upon termination or completion as the workflow status listener." - }, - { - "value": "queue_publisher", - "description": "Use the publisher implementation which publishes a message to the underlying queue implementation upon termination or completion as the workflow status listener." - } - ] - }, - { - "name": "conductor.default-event-queue.type", - "values": [ - { - "value": "sqs", - "description": "Use AWS SQS as the event queue to listen on for the WAIT task." - }, - { - "value": "amqp", - "description": "Use RabbitMQ as the event queue to listen on for the WAIT task." - }, - { - "value": "nats_stream", - "description": "Use NATS Stream as the event queue to listen on for the WAIT task." - } - ] - }, - { - "name": "conductor.workflow-execution-lock.type", - "values": [ - { - "value": "noop_lock", - "description": "Use the no-op implementation as the lock provider." - }, - { - "value": "local_only", - "description": "Use the local in-memory cache based implementation as the lock provider." - }, - { - "value": "redis", - "description": "Use the redis-lock implementation as the lock provider." - }, - { - "value": "zookeeper", - "description": "Use the zookeeper-lock implementation as the lock provider." - } - ] - } - ] -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java deleted file mode 100644 index d209887add..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/listener/ArchivingWorkflowStatusListenerTest.java +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.listener; - -import java.util.UUID; - -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; - -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.contribs.listener.archive.ArchivingWorkflowStatusListener; -import com.netflix.conductor.core.dal.ExecutionDAOFacade; -import com.netflix.conductor.model.WorkflowModel; - -import static org.mockito.Mockito.*; - -/** - * @author pavel.halabala - */ -public class ArchivingWorkflowStatusListenerTest { - - WorkflowModel workflow; - ExecutionDAOFacade executionDAOFacade; - ArchivingWorkflowStatusListener listener; - - @Before - public void before() { - workflow = new WorkflowModel(); - WorkflowDef def = new WorkflowDef(); - def.setName("name1"); - def.setVersion(1); - workflow.setWorkflowDefinition(def); - workflow.setWorkflowId(UUID.randomUUID().toString()); - - executionDAOFacade = Mockito.mock(ExecutionDAOFacade.class); - listener = new ArchivingWorkflowStatusListener(executionDAOFacade); - } - - @Test - public void testArchiveOnWorkflowCompleted() { - listener.onWorkflowCompleted(workflow); - verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true); - verifyNoMoreInteractions(executionDAOFacade); - } - - @Test - public void testArchiveOnWorkflowTerminated() { - listener.onWorkflowTerminated(workflow); - verify(executionDAOFacade, times(1)).removeWorkflow(workflow.getWorkflowId(), true); - verifyNoMoreInteractions(executionDAOFacade); - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfigurationTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfigurationTest.java deleted file mode 100644 index 3fdd736f3e..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/metrics/LoggingMetricsConfigurationTest.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import java.util.concurrent.TimeUnit; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.slf4j.Logger; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.context.annotation.Import; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.contribs.metrics.LoggingMetricsConfiguration.Slf4jReporterProvider; - -import com.codahale.metrics.MetricRegistry; - -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.timeout; -import static org.mockito.Mockito.verify; - -@RunWith(SpringRunner.class) -@Import({LoggingMetricsConfiguration.class, MetricsRegistryConfiguration.class}) -@TestPropertySource(properties = {"conductor.metrics-logger.enabled=true"}) -public class LoggingMetricsConfigurationTest { - - @Autowired MetricRegistry metricRegistry; - - @Test - public void testCollector() { - Logger logger = spy(Logger.class); - doReturn(true).when(logger).isInfoEnabled(any()); - Slf4jReporterProvider reporterProvider = - new Slf4jReporterProvider(metricRegistry, logger, 1); - metricRegistry.counter("test").inc(); - - reporterProvider.getReporter(); - verify(logger, timeout(TimeUnit.SECONDS.toMillis(10))).isInfoEnabled(null); - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfigurationTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfigurationTest.java deleted file mode 100644 index c87b75dd0b..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/metrics/PrometheusMetricsConfigurationTest.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.metrics; - -import java.lang.reflect.Field; -import java.util.Arrays; -import java.util.List; -import java.util.Optional; - -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.boot.test.context.TestConfiguration; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Import; -import org.springframework.context.annotation.Primary; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.spectator.api.Registry; -import com.netflix.spectator.api.Spectator; -import com.netflix.spectator.micrometer.MicrometerRegistry; - -import io.micrometer.core.instrument.MeterRegistry; -import io.micrometer.prometheus.PrometheusConfig; -import io.micrometer.prometheus.PrometheusMeterRegistry; - -import static org.junit.Assert.assertTrue; - -@RunWith(SpringRunner.class) -@Import({PrometheusMetricsConfiguration.class}) -@TestPropertySource(properties = {"conductor.metrics-prometheus.enabled=true"}) -public class PrometheusMetricsConfigurationTest { - - @SuppressWarnings("unchecked") - @Test - public void testCollector() throws IllegalAccessException { - final Optional registries = - Arrays.stream(Spectator.globalRegistry().getClass().getDeclaredFields()) - .filter(f -> f.getName().equals("registries")) - .findFirst(); - assertTrue(registries.isPresent()); - registries.get().setAccessible(true); - - List meters = (List) registries.get().get(Spectator.globalRegistry()); - assertTrue(meters.size() > 0); - Optional microMeterReg = - meters.stream() - .filter(r -> r.getClass().equals(MicrometerRegistry.class)) - .findFirst(); - assertTrue(microMeterReg.isPresent()); - } - - @TestConfiguration - public static class TestConfig { - - /** - * This bean will be injected in PrometheusMetricsConfiguration, which wraps it with a - * MicrometerRegistry, and appends it to the global registry. - * - * @return a Prometheus registry instance - */ - @Bean - @Primary - public MeterRegistry meterRegistry() { - return new PrometheusMeterRegistry(PrometheusConfig.DEFAULT); - } - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java deleted file mode 100644 index 38e9491219..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPEventQueueProviderTest.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp; - -import java.time.Duration; - -import org.junit.Before; -import org.junit.Test; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProvider; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; -import com.netflix.conductor.core.events.queue.ObservableQueue; - -import com.rabbitmq.client.AMQP.PROTOCOL; -import com.rabbitmq.client.ConnectionFactory; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class AMQPEventQueueProviderTest { - - private AMQPEventQueueProperties properties; - - @Before - public void setUp() { - properties = mock(AMQPEventQueueProperties.class); - when(properties.getBatchSize()).thenReturn(1); - when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); - when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); - when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); - when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); - when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); - when(properties.getPort()).thenReturn(PROTOCOL.PORT); - when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); - when(properties.isUseNio()).thenReturn(false); - when(properties.isDurable()).thenReturn(true); - when(properties.isExclusive()).thenReturn(false); - when(properties.isAutoDelete()).thenReturn(false); - when(properties.getContentType()).thenReturn("application/json"); - when(properties.getContentEncoding()).thenReturn("UTF-8"); - when(properties.getExchangeType()).thenReturn("topic"); - when(properties.getDeliveryMode()).thenReturn(2); - when(properties.isUseExchange()).thenReturn(true); - } - - @Test - public void testAMQPEventQueueProvider_defaultconfig_exchange() { - String exchangestring = - "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2"; - AMQPEventQueueProvider eventqProvider = - new AMQPEventQueueProvider(properties, "amqp_exchange", true); - ObservableQueue queue = eventqProvider.getQueue(exchangestring); - assertNotNull(queue); - assertEquals(exchangestring, queue.getName()); - assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, queue.getType()); - } - - @Test - public void testAMQPEventQueueProvider_defaultconfig_queue() { - String exchangestring = - "amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true"; - AMQPEventQueueProvider eventqProvider = - new AMQPEventQueueProvider(properties, "amqp_queue", false); - ObservableQueue queue = eventqProvider.getQueue(exchangestring); - assertNotNull(queue); - assertEquals(exchangestring, queue.getName()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, queue.getType()); - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java deleted file mode 100644 index 86b3ac5965..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPObservableQueueTest.java +++ /dev/null @@ -1,895 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp; - -import java.io.IOException; -import java.time.Duration; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedList; -import java.util.List; -import java.util.Random; -import java.util.UUID; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.commons.lang3.StringUtils; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mockito; -import org.mockito.internal.stubbing.answers.DoesNothing; -import org.mockito.stubbing.OngoingStubbing; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; -import com.netflix.conductor.contribs.queue.amqp.config.AMQPRetryPattern; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPConstants; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; -import com.netflix.conductor.contribs.queue.amqp.util.RetryType; -import com.netflix.conductor.core.events.queue.Message; - -import com.rabbitmq.client.AMQP; -import com.rabbitmq.client.AMQP.PROTOCOL; -import com.rabbitmq.client.AMQP.Queue.DeclareOk; -import com.rabbitmq.client.Address; -import com.rabbitmq.client.Channel; -import com.rabbitmq.client.Connection; -import com.rabbitmq.client.ConnectionFactory; -import com.rabbitmq.client.Consumer; -import com.rabbitmq.client.Envelope; -import com.rabbitmq.client.GetResponse; -import com.rabbitmq.client.impl.AMQImpl; -import rx.Observable; -import rx.observers.Subscribers; -import rx.observers.TestSubscriber; - -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; -import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.ArgumentMatchers.anyMap; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.atLeast; -import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doThrow; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -@SuppressWarnings({"rawtypes", "unchecked"}) -public class AMQPObservableQueueTest { - - final int batchSize = 10; - final int pollTimeMs = 500; - - Address[] addresses; - AMQPEventQueueProperties properties; - - @Before - public void setUp() { - properties = mock(AMQPEventQueueProperties.class); - when(properties.getBatchSize()).thenReturn(1); - when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); - when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); - when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); - when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); - when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); - when(properties.getPort()).thenReturn(PROTOCOL.PORT); - when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); - when(properties.isUseNio()).thenReturn(false); - when(properties.isDurable()).thenReturn(true); - when(properties.isExclusive()).thenReturn(false); - when(properties.isAutoDelete()).thenReturn(false); - when(properties.getContentType()).thenReturn("application/json"); - when(properties.getContentEncoding()).thenReturn("UTF-8"); - when(properties.getExchangeType()).thenReturn("topic"); - when(properties.getDeliveryMode()).thenReturn(2); - when(properties.isUseExchange()).thenReturn(true); - addresses = new Address[] {new Address("localhost", PROTOCOL.PORT)}; - AMQPConnection.setAMQPConnection(null); - } - - List buildQueue(final Random random, final int bound) { - final LinkedList queue = new LinkedList(); - for (int i = 0; i < bound; i++) { - AMQP.BasicProperties props = mock(AMQP.BasicProperties.class); - when(props.getMessageId()).thenReturn(UUID.randomUUID().toString()); - Envelope envelope = mock(Envelope.class); - when(envelope.getDeliveryTag()).thenReturn(random.nextLong()); - GetResponse response = mock(GetResponse.class); - when(response.getProps()).thenReturn(props); - when(response.getEnvelope()).thenReturn(envelope); - when(response.getBody()).thenReturn("{}".getBytes()); - when(response.getMessageCount()).thenReturn(bound - i); - queue.add(response); - } - return queue; - } - - Channel mockBaseChannel() throws IOException, TimeoutException { - Channel channel = mock(Channel.class); - when(channel.isOpen()).thenReturn(Boolean.TRUE); - /* - * doAnswer(invocation -> { when(channel.isOpen()).thenReturn(Boolean.FALSE); - * return DoesNothing.doesNothing(); }).when(channel).close(); - */ - return channel; - } - - Channel mockChannelForQueue( - Channel channel, - boolean isWorking, - boolean exists, - String name, - List queue) - throws IOException { - // queueDeclarePassive - final AMQImpl.Queue.DeclareOk queueDeclareOK = - new AMQImpl.Queue.DeclareOk(name, queue.size(), 1); - if (exists) { - when(channel.queueDeclarePassive(eq(name))).thenReturn(queueDeclareOK); - } else { - when(channel.queueDeclarePassive(eq(name))) - .thenThrow(new IOException("Queue " + name + " exists")); - } - // queueDeclare - OngoingStubbing declareOkOngoingStubbing = - when(channel.queueDeclare( - eq(name), anyBoolean(), anyBoolean(), anyBoolean(), anyMap())) - .thenReturn(queueDeclareOK); - if (!isWorking) { - declareOkOngoingStubbing.thenThrow( - new IOException("Cannot declare queue " + name), - new RuntimeException("Not working")); - } - // messageCount - when(channel.messageCount(eq(name))).thenReturn((long) queue.size()); - // basicGet - OngoingStubbing getResponseOngoingStubbing = - Mockito.when(channel.basicConsume(eq(name), anyBoolean(), any(Consumer.class))) - .thenReturn(name); - if (!isWorking) { - getResponseOngoingStubbing.thenThrow( - new IOException("Not working"), new RuntimeException("Not working")); - } - // basicPublish - if (isWorking) { - doNothing() - .when(channel) - .basicPublish( - eq(StringUtils.EMPTY), - eq(name), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } else { - doThrow(new IOException("Not working")) - .when(channel) - .basicPublish( - eq(StringUtils.EMPTY), - eq(name), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } - return channel; - } - - Channel mockChannelForExchange( - Channel channel, - boolean isWorking, - boolean exists, - String queueName, - String name, - String type, - String routingKey, - List queue) - throws IOException { - // exchangeDeclarePassive - final AMQImpl.Exchange.DeclareOk exchangeDeclareOK = new AMQImpl.Exchange.DeclareOk(); - if (exists) { - when(channel.exchangeDeclarePassive(eq(name))).thenReturn(exchangeDeclareOK); - } else { - when(channel.exchangeDeclarePassive(eq(name))) - .thenThrow(new IOException("Exchange " + name + " exists")); - } - // exchangeDeclare - OngoingStubbing declareOkOngoingStubbing = - when(channel.exchangeDeclare( - eq(name), eq(type), anyBoolean(), anyBoolean(), anyMap())) - .thenReturn(exchangeDeclareOK); - if (!isWorking) { - declareOkOngoingStubbing.thenThrow( - new IOException("Cannot declare exchange " + name + " of type " + type), - new RuntimeException("Not working")); - } - // queueDeclarePassive - final AMQImpl.Queue.DeclareOk queueDeclareOK = - new AMQImpl.Queue.DeclareOk(queueName, queue.size(), 1); - if (exists) { - when(channel.queueDeclarePassive(eq(queueName))).thenReturn(queueDeclareOK); - } else { - when(channel.queueDeclarePassive(eq(queueName))) - .thenThrow(new IOException("Queue " + queueName + " exists")); - } - // queueDeclare - when(channel.queueDeclare( - eq(queueName), anyBoolean(), anyBoolean(), anyBoolean(), anyMap())) - .thenReturn(queueDeclareOK); - // queueBind - when(channel.queueBind(eq(queueName), eq(name), eq(routingKey))) - .thenReturn(new AMQImpl.Queue.BindOk()); - // messageCount - when(channel.messageCount(eq(name))).thenReturn((long) queue.size()); - // basicGet - - OngoingStubbing getResponseOngoingStubbing = - Mockito.when(channel.basicConsume(eq(queueName), anyBoolean(), any(Consumer.class))) - .thenReturn(queueName); - - if (!isWorking) { - getResponseOngoingStubbing.thenThrow( - new IOException("Not working"), new RuntimeException("Not working")); - } - // basicPublish - if (isWorking) { - doNothing() - .when(channel) - .basicPublish( - eq(name), - eq(routingKey), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } else { - doThrow(new IOException("Not working")) - .when(channel) - .basicPublish( - eq(name), - eq(routingKey), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } - return channel; - } - - Connection mockGoodConnection(Channel channel) throws IOException { - Connection connection = mock(Connection.class); - when(connection.createChannel()).thenReturn(channel); - when(connection.isOpen()).thenReturn(Boolean.TRUE); - /* - * doAnswer(invocation -> { when(connection.isOpen()).thenReturn(Boolean.FALSE); - * return DoesNothing.doesNothing(); }).when(connection).close(); - */ return connection; - } - - Connection mockBadConnection() throws IOException { - Connection connection = mock(Connection.class); - when(connection.createChannel()).thenThrow(new IOException("Can't create channel")); - when(connection.isOpen()).thenReturn(Boolean.TRUE); - doThrow(new IOException("Can't close connection")).when(connection).close(); - return connection; - } - - ConnectionFactory mockConnectionFactory(Connection connection) - throws IOException, TimeoutException { - ConnectionFactory connectionFactory = mock(ConnectionFactory.class); - when(connectionFactory.newConnection(eq(addresses), Mockito.anyString())) - .thenReturn(connection); - return connectionFactory; - } - - void runObserve( - Channel channel, - AMQPObservableQueue observableQueue, - String queueName, - boolean useWorkingChannel, - int batchSize) - throws IOException { - - final List found = new ArrayList<>(batchSize); - TestSubscriber subscriber = TestSubscriber.create(Subscribers.create(found::add)); - rx.Observable observable = - observableQueue.observe().take(pollTimeMs * 2, TimeUnit.MILLISECONDS); - assertNotNull(observable); - observable.subscribe(subscriber); - subscriber.awaitTerminalEvent(); - subscriber.assertNoErrors(); - subscriber.assertCompleted(); - if (useWorkingChannel) { - verify(channel, atLeast(1)) - .basicConsume(eq(queueName), anyBoolean(), any(Consumer.class)); - doNothing().when(channel).basicAck(anyLong(), eq(false)); - doAnswer(DoesNothing.doesNothing()).when(channel).basicAck(anyLong(), eq(false)); - observableQueue.ack(Collections.synchronizedList(found)); - } else { - assertNotNull(found); - assertTrue(found.isEmpty()); - } - observableQueue.close(); - } - - @Test - public void - testGetMessagesFromExistingExchangeWithDurableExclusiveAutoDeleteQueueConfiguration() - throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testGetMessagesFromExchangeAndCustomConfigurationFromURI( - channel, connection, true, true, true, true, true); - } - - @Test - public void testPublishMessagesToNotExistingExchangeAndDefaultConfiguration() - throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testPublishMessagesToExchangeAndDefaultConfiguration(channel, connection, false, true); - } - - @Test - public void testAck() throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - final Random random = new Random(); - - final String name = RandomStringUtils.randomAlphabetic(30), - type = "topic", - routingKey = RandomStringUtils.randomAlphabetic(30); - AMQPRetryPattern retrySettings = null; - final AMQPSettings settings = - new AMQPSettings(properties) - .fromURI( - "amqp_exchange:" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey); - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - true, - settings, - retrySettings, - batchSize, - pollTimeMs); - List messages = new LinkedList<>(); - Message msg = new Message(); - msg.setId("0e3eef8f-ebb1-4244-9665-759ab5bdf433"); - msg.setPayload("Payload"); - msg.setReceipt("1"); - messages.add(msg); - List deliveredTags = observableQueue.ack(messages); - assertNotNull(deliveredTags); - } - - private void testGetMessagesFromExchangeAndDefaultConfiguration( - Channel channel, Connection connection, boolean exists, boolean useWorkingChannel) - throws IOException, TimeoutException { - - final Random random = new Random(); - - final String name = RandomStringUtils.randomAlphabetic(30), - type = "topic", - routingKey = RandomStringUtils.randomAlphabetic(30); - final String queueName = String.format("bound_to_%s", name); - - final AMQPSettings settings = - new AMQPSettings(properties) - .fromURI( - "amqp_exchange:" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey); - assertTrue(settings.isDurable()); - assertFalse(settings.isExclusive()); - assertFalse(settings.autoDelete()); - assertEquals(2, settings.getDeliveryMode()); - assertEquals(name, settings.getQueueOrExchangeName()); - assertEquals(type, settings.getExchangeType()); - assertEquals(routingKey, settings.getRoutingKey()); - - List queue = buildQueue(random, batchSize); - channel = - mockChannelForExchange( - channel, - useWorkingChannel, - exists, - queueName, - name, - type, - routingKey, - queue); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - true, - settings, - retrySettings, - batchSize, - pollTimeMs); - - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); - assertEquals( - AMQPConstants.AMQP_EXCHANGE_TYPE - + ":" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey, - observableQueue.getName()); - assertEquals(name, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - - runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); - - if (useWorkingChannel) { - verify(channel, atLeastOnce()) - .exchangeDeclare( - eq(name), - eq(type), - eq(settings.isDurable()), - eq(settings.autoDelete()), - eq(Collections.emptyMap())); - verify(channel, atLeastOnce()) - .queueDeclare( - eq(queueName), - eq(settings.isDurable()), - eq(settings.isExclusive()), - eq(settings.autoDelete()), - anyMap()); - - verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey)); - } - } - - private void testGetMessagesFromExchangeAndCustomConfigurationFromURI( - Channel channel, - Connection connection, - boolean exists, - boolean useWorkingChannel, - boolean durable, - boolean exclusive, - boolean autoDelete) - throws IOException, TimeoutException { - - final Random random = new Random(); - - final String name = RandomStringUtils.randomAlphabetic(30), - type = "topic", - routingKey = RandomStringUtils.randomAlphabetic(30); - final String queueName = String.format("bound_to_%s", name); - - final AMQPSettings settings = - new AMQPSettings(properties) - .fromURI( - "amqp_exchange:" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey - + "&deliveryMode=2" - + "&durable=" - + durable - + "&exclusive=" - + exclusive - + "&autoDelete=" - + autoDelete); - assertEquals(durable, settings.isDurable()); - assertEquals(exclusive, settings.isExclusive()); - assertEquals(autoDelete, settings.autoDelete()); - assertEquals(2, settings.getDeliveryMode()); - assertEquals(name, settings.getQueueOrExchangeName()); - assertEquals(type, settings.getExchangeType()); - assertEquals(routingKey, settings.getRoutingKey()); - - List queue = buildQueue(random, batchSize); - channel = - mockChannelForExchange( - channel, - useWorkingChannel, - exists, - queueName, - name, - type, - routingKey, - queue); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - true, - settings, - retrySettings, - batchSize, - pollTimeMs); - - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); - assertEquals( - AMQPConstants.AMQP_EXCHANGE_TYPE - + ":" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey - + "&deliveryMode=2" - + "&durable=" - + durable - + "&exclusive=" - + exclusive - + "&autoDelete=" - + autoDelete, - observableQueue.getName()); - assertEquals(name, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - - runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); - - if (useWorkingChannel) { - verify(channel, atLeastOnce()) - .exchangeDeclare( - eq(name), - eq(type), - eq(settings.isDurable()), - eq(settings.autoDelete()), - eq(Collections.emptyMap())); - verify(channel, atLeastOnce()) - .queueDeclare( - eq(queueName), - eq(settings.isDurable()), - eq(settings.isExclusive()), - eq(settings.autoDelete()), - anyMap()); - - verify(channel, atLeastOnce()).queueBind(eq(queueName), eq(name), eq(routingKey)); - } - } - - private void testPublishMessagesToExchangeAndDefaultConfiguration( - Channel channel, Connection connection, boolean exists, boolean useWorkingChannel) - throws IOException, TimeoutException { - final Random random = new Random(); - - final String name = RandomStringUtils.randomAlphabetic(30), - type = "topic", - queueName = RandomStringUtils.randomAlphabetic(30), - routingKey = RandomStringUtils.randomAlphabetic(30); - - final AMQPSettings settings = - new AMQPSettings(properties) - .fromURI( - "amqp_exchange:" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey - + "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true"); - assertTrue(settings.isDurable()); - assertFalse(settings.isExclusive()); - assertTrue(settings.autoDelete()); - assertEquals(2, settings.getDeliveryMode()); - assertEquals(name, settings.getQueueOrExchangeName()); - assertEquals(type, settings.getExchangeType()); - assertEquals(routingKey, settings.getRoutingKey()); - - List queue = buildQueue(random, batchSize); - channel = - mockChannelForExchange( - channel, - useWorkingChannel, - exists, - queueName, - name, - type, - routingKey, - queue); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - true, - settings, - retrySettings, - batchSize, - pollTimeMs); - - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_EXCHANGE_TYPE, observableQueue.getType()); - assertEquals( - AMQPConstants.AMQP_EXCHANGE_TYPE - + ":" - + name - + "?exchangeType=" - + type - + "&routingKey=" - + routingKey - + "&deliveryMode=2&durable=true&exclusive=false&autoDelete=true", - observableQueue.getName()); - assertEquals(name, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - - List messages = new LinkedList<>(); - Observable.range(0, batchSize) - .forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null))); - assertEquals(batchSize, messages.size()); - observableQueue.publish(messages); - - if (useWorkingChannel) { - verify(channel, times(batchSize)) - .basicPublish( - eq(name), - eq(routingKey), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } - } - - @Test - public void testGetMessagesFromExistingQueueAndDefaultConfiguration() - throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, true); - } - - @Test - public void testGetMessagesFromNotExistingQueueAndDefaultConfiguration() - throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, false, true); - } - - @Test - public void testGetMessagesFromQueueWithBadChannel() throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testGetMessagesFromQueueAndDefaultConfiguration(channel, connection, true, false); - } - - @Test(expected = RuntimeException.class) - public void testPublishMessagesToQueueWithBadChannel() throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testPublishMessagesToQueueAndDefaultConfiguration(channel, connection, true, false); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPObservalbleQueue_empty() throws IOException, TimeoutException { - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - null, addresses, false, settings, retrySettings, batchSize, pollTimeMs); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPObservalbleQueue_addressEmpty() throws IOException, TimeoutException { - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(mockGoodConnection(mockBaseChannel())), - null, - false, - settings, - retrySettings, - batchSize, - pollTimeMs); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPObservalbleQueue_settingsEmpty() throws IOException, TimeoutException { - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(mockGoodConnection(mockBaseChannel())), - addresses, - false, - null, - retrySettings, - batchSize, - pollTimeMs); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPObservalbleQueue_batchsizezero() throws IOException, TimeoutException { - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(mockGoodConnection(mockBaseChannel())), - addresses, - false, - settings, - retrySettings, - 0, - pollTimeMs); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPObservalbleQueue_polltimezero() throws IOException, TimeoutException { - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:test"); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(mockGoodConnection(mockBaseChannel())), - addresses, - false, - settings, - retrySettings, - batchSize, - 0); - } - - @Test - public void testclosetExistingQueueAndDefaultConfiguration() - throws IOException, TimeoutException { - // Mock channel and connection - Channel channel = mockBaseChannel(); - Connection connection = mockGoodConnection(channel); - testGetMessagesFromQueueAndDefaultConfiguration_close(channel, connection, false, true); - } - - private void testGetMessagesFromQueueAndDefaultConfiguration( - Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) - throws IOException, TimeoutException { - final Random random = new Random(); - - final String queueName = RandomStringUtils.randomAlphabetic(30); - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName); - - List queue = buildQueue(random, batchSize); - channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - false, - settings, - retrySettings, - batchSize, - pollTimeMs); - - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName()); - assertEquals(queueName, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - - runObserve(channel, observableQueue, queueName, useWorkingChannel, batchSize); - } - - private void testGetMessagesFromQueueAndDefaultConfiguration_close( - Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) - throws IOException, TimeoutException { - final Random random = new Random(); - - final String queueName = RandomStringUtils.randomAlphabetic(30); - AMQPSettings settings = new AMQPSettings(properties).fromURI("amqp_queue:" + queueName); - - List queue = buildQueue(random, batchSize); - channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); - AMQPRetryPattern retrySettings = null; - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - false, - settings, - retrySettings, - batchSize, - pollTimeMs); - observableQueue.close(); - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE + ":" + queueName, observableQueue.getName()); - assertEquals(queueName, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - } - - private void testPublishMessagesToQueueAndDefaultConfiguration( - Channel channel, Connection connection, boolean queueExists, boolean useWorkingChannel) - throws IOException, TimeoutException { - final Random random = new Random(); - - final String queueName = RandomStringUtils.randomAlphabetic(30); - final AMQPSettings settings = - new AMQPSettings(properties) - .fromURI( - "amqp_queue:" - + queueName - + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true"); - assertTrue(settings.isDurable()); - assertFalse(settings.isExclusive()); - assertTrue(settings.autoDelete()); - assertEquals(2, settings.getDeliveryMode()); - - List queue = buildQueue(random, batchSize); - channel = mockChannelForQueue(channel, useWorkingChannel, queueExists, queueName, queue); - AMQPRetryPattern retrySettings = new AMQPRetryPattern(3, 5, RetryType.REGULARINTERVALS); - AMQPObservableQueue observableQueue = - new AMQPObservableQueue( - mockConnectionFactory(connection), - addresses, - false, - settings, - retrySettings, - batchSize, - pollTimeMs); - - assertArrayEquals(addresses, observableQueue.getAddresses()); - assertEquals(AMQPConstants.AMQP_QUEUE_TYPE, observableQueue.getType()); - assertEquals( - AMQPConstants.AMQP_QUEUE_TYPE - + ":" - + queueName - + "?deliveryMode=2&durable=true&exclusive=false&autoDelete=true", - observableQueue.getName()); - assertEquals(queueName, observableQueue.getURI()); - assertEquals(batchSize, observableQueue.getBatchSize()); - assertEquals(pollTimeMs, observableQueue.getPollTimeInMS()); - assertEquals(queue.size(), observableQueue.size()); - - List messages = new LinkedList<>(); - Observable.range(0, batchSize) - .forEach((Integer x) -> messages.add(new Message("" + x, "payload: " + x, null))); - assertEquals(batchSize, messages.size()); - observableQueue.publish(messages); - - if (useWorkingChannel) { - verify(channel, times(batchSize)) - .basicPublish( - eq(StringUtils.EMPTY), - eq(queueName), - any(AMQP.BasicProperties.class), - any(byte[].class)); - } - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java deleted file mode 100644 index 91afc6e50b..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/queue/amqp/AMQPSettingsTest.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.queue.amqp; - -import java.time.Duration; - -import org.junit.Before; -import org.junit.Test; - -import com.netflix.conductor.contribs.queue.amqp.config.AMQPEventQueueProperties; -import com.netflix.conductor.contribs.queue.amqp.util.AMQPSettings; - -import com.rabbitmq.client.AMQP.PROTOCOL; -import com.rabbitmq.client.ConnectionFactory; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class AMQPSettingsTest { - - private AMQPEventQueueProperties properties; - - @Before - public void setUp() { - properties = mock(AMQPEventQueueProperties.class); - when(properties.getBatchSize()).thenReturn(1); - when(properties.getPollTimeDuration()).thenReturn(Duration.ofMillis(100)); - when(properties.getHosts()).thenReturn(ConnectionFactory.DEFAULT_HOST); - when(properties.getUsername()).thenReturn(ConnectionFactory.DEFAULT_USER); - when(properties.getPassword()).thenReturn(ConnectionFactory.DEFAULT_PASS); - when(properties.getVirtualHost()).thenReturn(ConnectionFactory.DEFAULT_VHOST); - when(properties.getPort()).thenReturn(PROTOCOL.PORT); - when(properties.getConnectionTimeoutInMilliSecs()).thenReturn(60000); - when(properties.isUseNio()).thenReturn(false); - when(properties.isDurable()).thenReturn(true); - when(properties.isExclusive()).thenReturn(false); - when(properties.isAutoDelete()).thenReturn(false); - when(properties.getContentType()).thenReturn("application/json"); - when(properties.getContentEncoding()).thenReturn("UTF-8"); - when(properties.getExchangeType()).thenReturn("topic"); - when(properties.getDeliveryMode()).thenReturn(2); - when(properties.isUseExchange()).thenReturn(true); - } - - @Test - public void testAMQPSettings_exchange_fromuri_defaultconfig() { - String exchangestring = - "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=2"; - AMQPSettings settings = new AMQPSettings(properties); - settings.fromURI(exchangestring); - assertEquals("topic", settings.getExchangeType()); - assertEquals("test", settings.getRoutingKey()); - assertEquals("myExchangeName", settings.getQueueOrExchangeName()); - } - - @Test - public void testAMQPSettings_queue_fromuri_defaultconfig() { - String exchangestring = - "amqp_queue:myQueueName?deliveryMode=2&durable=false&autoDelete=true&exclusive=true"; - AMQPSettings settings = new AMQPSettings(properties); - settings.fromURI(exchangestring); - assertFalse(settings.isDurable()); - assertTrue(settings.isExclusive()); - assertTrue(settings.autoDelete()); - assertEquals(2, settings.getDeliveryMode()); - assertEquals("myQueueName", settings.getQueueOrExchangeName()); - } - - @Test(expected = IllegalArgumentException.class) - public void testAMQPSettings_exchange_fromuri_wrongdeliverymode() { - String exchangestring = - "amqp_exchange:myExchangeName?exchangeType=topic&routingKey=test&deliveryMode=3"; - AMQPSettings settings = new AMQPSettings(properties); - settings.fromURI(exchangestring); - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java deleted file mode 100644 index 338a8b862b..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaProducerManagerTest.java +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.tasks.kafka; - -import java.time.Duration; -import java.util.Properties; - -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.serialization.LongSerializer; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -public class KafkaProducerManagerTest { - - @Test - public void testRequestTimeoutSetFromDefault() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(100), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - Properties props = manager.getProducerProperties(input); - assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "100"); - } - - @Test - public void testRequestTimeoutSetFromInput() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(100), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - input.setRequestTimeoutMs(200); - Properties props = manager.getProducerProperties(input); - assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "200"); - } - - @Test - public void testRequestTimeoutSetFromConfig() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(150), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - Properties props = manager.getProducerProperties(input); - assertEquals(props.getProperty(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG), "150"); - } - - @SuppressWarnings("rawtypes") - @Test(expected = RuntimeException.class) - public void testExecutionException() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(150), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - Producer producer = manager.getProducer(input); - assertNotNull(producer); - } - - @SuppressWarnings("rawtypes") - @Test - public void testCacheInvalidation() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(150), Duration.ofMillis(500), 0, Duration.ofMillis(0)); - KafkaPublishTask.Input input = getInput(); - input.setBootStrapServers(""); - Properties props = manager.getProducerProperties(input); - Producer producerMock = mock(Producer.class); - Producer producer = manager.getFromCache(props, () -> producerMock); - assertNotNull(producer); - verify(producerMock, times(1)).close(); - } - - @Test - public void testMaxBlockMsFromConfig() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(150), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - Properties props = manager.getProducerProperties(input); - assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "500"); - } - - @Test - public void testMaxBlockMsFromInput() { - KafkaProducerManager manager = - new KafkaProducerManager( - Duration.ofMillis(150), - Duration.ofMillis(500), - 10, - Duration.ofMillis(120000)); - KafkaPublishTask.Input input = getInput(); - input.setMaxBlockMs(600); - Properties props = manager.getProducerProperties(input); - assertEquals(props.getProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG), "600"); - } - - private KafkaPublishTask.Input getInput() { - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setTopic("testTopic"); - input.setValue("TestMessage"); - input.setKeySerializer(LongSerializer.class.getCanonicalName()); - input.setBootStrapServers("servers"); - return input; - } -} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java b/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java deleted file mode 100644 index a6e0961204..0000000000 --- a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/kafka/KafkaPublishTaskTest.java +++ /dev/null @@ -1,223 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.contribs.tasks.kafka; - -import java.time.Duration; -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; - -import org.apache.kafka.clients.producer.Producer; -import org.apache.kafka.common.serialization.IntegerSerializer; -import org.apache.kafka.common.serialization.LongSerializer; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.model.TaskModel; -import com.netflix.conductor.model.WorkflowModel; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import static org.junit.Assert.assertEquals; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -@SuppressWarnings({"unchecked", "rawtypes"}) -@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) -@RunWith(SpringRunner.class) -public class KafkaPublishTaskTest { - - @Autowired private ObjectMapper objectMapper; - - @Test - public void missingRequest_Fail() { - KafkaPublishTask kafkaPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - TaskModel task = new TaskModel(); - kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.FAILED, task.getStatus()); - } - - @Test - public void missingValue_Fail() { - - TaskModel task = new TaskModel(); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setBootStrapServers("localhost:9092"); - input.setTopic("testTopic"); - - task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); - - KafkaPublishTask kPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.FAILED, task.getStatus()); - } - - @Test - public void missingBootStrapServers_Fail() { - - TaskModel task = new TaskModel(); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - - Map value = new HashMap<>(); - input.setValue(value); - input.setTopic("testTopic"); - - task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); - - KafkaPublishTask kPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.FAILED, task.getStatus()); - } - - @Test - public void kafkaPublishExecutionException_Fail() - throws ExecutionException, InterruptedException { - - TaskModel task = getTask(); - - KafkaProducerManager producerManager = mock(KafkaProducerManager.class); - KafkaPublishTask kafkaPublishTask = new KafkaPublishTask(producerManager, objectMapper); - - Producer producer = mock(Producer.class); - - when(producerManager.getProducer(any())).thenReturn(producer); - Future publishingFuture = mock(Future.class); - when(producer.send(any())).thenReturn(publishingFuture); - - ExecutionException executionException = mock(ExecutionException.class); - - when(executionException.getMessage()).thenReturn("Execution exception"); - when(publishingFuture.get()).thenThrow(executionException); - - kafkaPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.FAILED, task.getStatus()); - assertEquals( - "Failed to invoke kafka task due to: Execution exception", - task.getReasonForIncompletion()); - } - - @Test - public void kafkaPublishUnknownException_Fail() { - - TaskModel task = getTask(); - - KafkaProducerManager producerManager = mock(KafkaProducerManager.class); - KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); - - Producer producer = mock(Producer.class); - - when(producerManager.getProducer(any())).thenReturn(producer); - when(producer.send(any())).thenThrow(new RuntimeException("Unknown exception")); - - kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.FAILED, task.getStatus()); - assertEquals( - "Failed to invoke kafka task due to: Unknown exception", - task.getReasonForIncompletion()); - } - - @Test - public void kafkaPublishSuccess_Completed() { - - TaskModel task = getTask(); - - KafkaProducerManager producerManager = mock(KafkaProducerManager.class); - KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); - - Producer producer = mock(Producer.class); - - when(producerManager.getProducer(any())).thenReturn(producer); - when(producer.send(any())).thenReturn(mock(Future.class)); - - kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.COMPLETED, task.getStatus()); - } - - @Test - public void kafkaPublishSuccess_AsyncComplete() { - - TaskModel task = getTask(); - task.getInputData().put("asyncComplete", true); - - KafkaProducerManager producerManager = mock(KafkaProducerManager.class); - KafkaPublishTask kPublishTask = new KafkaPublishTask(producerManager, objectMapper); - - Producer producer = mock(Producer.class); - - when(producerManager.getProducer(any())).thenReturn(producer); - when(producer.send(any())).thenReturn(mock(Future.class)); - - kPublishTask.start(mock(WorkflowModel.class), task, mock(WorkflowExecutor.class)); - assertEquals(TaskModel.Status.IN_PROGRESS, task.getStatus()); - } - - private TaskModel getTask() { - TaskModel task = new TaskModel(); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setBootStrapServers("localhost:9092"); - - Map value = new HashMap<>(); - - value.put("input_key1", "value1"); - value.put("input_key2", 45.3d); - - input.setValue(value); - input.setTopic("testTopic"); - task.getInputData().put(KafkaPublishTask.REQUEST_PARAMETER_NAME, input); - return task; - } - - @Test - public void integerSerializer_integerObject() { - KafkaPublishTask kPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setKeySerializer(IntegerSerializer.class.getCanonicalName()); - input.setKey(String.valueOf(Integer.MAX_VALUE)); - assertEquals(kPublishTask.getKey(input), Integer.MAX_VALUE); - } - - @Test - public void longSerializer_longObject() { - KafkaPublishTask kPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setKeySerializer(LongSerializer.class.getCanonicalName()); - input.setKey(String.valueOf(Long.MAX_VALUE)); - assertEquals(kPublishTask.getKey(input), Long.MAX_VALUE); - } - - @Test - public void noSerializer_StringObject() { - KafkaPublishTask kPublishTask = - new KafkaPublishTask(getKafkaProducerManager(), objectMapper); - KafkaPublishTask.Input input = new KafkaPublishTask.Input(); - input.setKey("testStringKey"); - assertEquals(kPublishTask.getKey(input), "testStringKey"); - } - - private KafkaProducerManager getKafkaProducerManager() { - return new KafkaProducerManager( - Duration.ofMillis(100), Duration.ofMillis(500), 120000, Duration.ofMillis(10)); - } -} diff --git a/core/build.gradle b/core/build.gradle index 600b8f219e..53c7c7ca82 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -39,6 +39,8 @@ dependencies { implementation "org.apache.bval:bval-jsr:${revBval}" + implementation "com.github.ben-manes.caffeine:caffeine" + // JAXB is not bundled with Java 11, dependencies added explicitly // These are needed by Apache BVAL implementation "jakarta.xml.bind:jakarta.xml.bind-api:${revJAXB}" @@ -55,9 +57,3 @@ dependencies { testImplementation "org.spockframework:spock-core:${revSpock}" testImplementation "org.spockframework:spock-spring:${revSpock}" } - -test { - testLogging { - exceptionFormat = 'full' - } -} diff --git a/core/dependencies.lock b/core/dependencies.lock index 4dca2dbed9..bdf032d679 100644 --- a/core/dependencies.lock +++ b/core/dependencies.lock @@ -26,6 +26,15 @@ "org.hibernate.validator:hibernate-validator" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0" }, @@ -110,6 +119,12 @@ "org.apache.logging.log4j:log4j-web": { "locked": "2.17.1" }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.glassfish:jakarta.el": { "locked": "3.0.3", "transitive": [ @@ -249,6 +264,15 @@ "com.netflix.conductor:conductor-common" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -353,6 +377,12 @@ "com.netflix.conductor:conductor-common" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ @@ -390,6 +420,15 @@ "org.hibernate.validator:hibernate-validator" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0" }, @@ -572,6 +611,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.groovy:groovy": { "locked": "2.5.14", "transitive": [ @@ -1067,6 +1112,15 @@ "org.hibernate.validator:hibernate-validator" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -1298,6 +1352,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.groovy:groovy": { "locked": "2.5.14", "transitive": [ diff --git a/core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java b/core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java index 42f3fcd4c7..eedef68f1a 100644 --- a/core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java +++ b/core/src/main/java/com/netflix/conductor/core/config/ConductorCoreConfiguration.java @@ -43,7 +43,7 @@ import com.netflix.conductor.core.listener.WorkflowStatusListenerStub; import com.netflix.conductor.core.storage.DummyPayloadStorage; import com.netflix.conductor.core.sync.Lock; -import com.netflix.conductor.core.sync.NoopLock; +import com.netflix.conductor.core.sync.noop.NoopLock; import static com.netflix.conductor.core.events.EventQueues.EVENT_QUEUE_PROVIDERS_QUALIFIER; import static com.netflix.conductor.core.execution.tasks.SystemTaskRegistry.ASYNC_SYSTEM_TASKS_QUALIFIER; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAO.java b/core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAO.java similarity index 98% rename from contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAO.java rename to core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAO.java index 4b5f22c6b2..4a7f427cbc 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAO.java +++ b/core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAO.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.dao.index; +package com.netflix.conductor.core.index; import java.util.Collections; import java.util.List; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAOConfiguration.java b/core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAOConfiguration.java similarity index 95% rename from contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAOConfiguration.java rename to core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAOConfiguration.java index b9feec8de2..0e9e2466b4 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/dao/index/NoopIndexDAOConfiguration.java +++ b/core/src/main/java/com/netflix/conductor/core/index/NoopIndexDAOConfiguration.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.dao.index; +package com.netflix.conductor.core.index; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLock.java b/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLock.java similarity index 87% rename from contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLock.java rename to core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLock.java index 36cb86ecc7..ec5f6eec0c 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLock.java +++ b/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLock.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.lock; +package com.netflix.conductor.core.sync.local; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executors; @@ -23,12 +23,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.netflix.conductor.annotations.VisibleForTesting; import com.netflix.conductor.core.sync.Lock; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import com.github.benmanes.caffeine.cache.CacheLoader; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; public class LocalOnlyLock implements Lock { @@ -44,7 +44,7 @@ public Semaphore load(String key) { private static final ConcurrentHashMap> SCHEDULEDFUTURES = new ConcurrentHashMap<>(); private static final LoadingCache LOCKIDTOSEMAPHOREMAP = - CacheBuilder.newBuilder().build(LOADER); + Caffeine.newBuilder().build(LOADER); private static final ThreadGroup THREAD_GROUP = new ThreadGroup("LocalOnlyLock-scheduler"); private static final ThreadFactory THREAD_FACTORY = runnable -> new Thread(THREAD_GROUP, runnable); @@ -54,14 +54,14 @@ public Semaphore load(String key) { @Override public void acquireLock(String lockId) { LOGGER.trace("Locking {}", lockId); - LOCKIDTOSEMAPHOREMAP.getUnchecked(lockId).acquireUninterruptibly(); + LOCKIDTOSEMAPHOREMAP.get(lockId).acquireUninterruptibly(); } @Override public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) { try { LOGGER.trace("Locking {} with timeout {} {}", lockId, timeToTry, unit); - return LOCKIDTOSEMAPHOREMAP.getUnchecked(lockId).tryAcquire(timeToTry, unit); + return LOCKIDTOSEMAPHOREMAP.get(lockId).tryAcquire(timeToTry, unit); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new RuntimeException(e); @@ -100,9 +100,9 @@ public void releaseLock(String lockId) { // The check is here to prevent semaphore getting above 1 // e.g. in case when lease runs out but release is also called synchronized (LOCKIDTOSEMAPHOREMAP) { - if (LOCKIDTOSEMAPHOREMAP.getUnchecked(lockId).availablePermits() == 0) { + if (LOCKIDTOSEMAPHOREMAP.get(lockId).availablePermits() == 0) { LOGGER.trace("Releasing {}", lockId); - LOCKIDTOSEMAPHOREMAP.getUnchecked(lockId).release(); + LOCKIDTOSEMAPHOREMAP.get(lockId).release(); removeLeaseExpirationJob(lockId); } } diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLockConfiguration.java b/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLockConfiguration.java similarity index 95% rename from contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLockConfiguration.java rename to core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLockConfiguration.java index 431dc0de52..41a025406d 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/lock/LocalOnlyLockConfiguration.java +++ b/core/src/main/java/com/netflix/conductor/core/sync/local/LocalOnlyLockConfiguration.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.lock; +package com.netflix.conductor.core.sync.local; import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; import org.springframework.context.annotation.Bean; diff --git a/core/src/main/java/com/netflix/conductor/core/sync/NoopLock.java b/core/src/main/java/com/netflix/conductor/core/sync/noop/NoopLock.java similarity index 92% rename from core/src/main/java/com/netflix/conductor/core/sync/NoopLock.java rename to core/src/main/java/com/netflix/conductor/core/sync/noop/NoopLock.java index 912a23c429..5d492da1e6 100644 --- a/core/src/main/java/com/netflix/conductor/core/sync/NoopLock.java +++ b/core/src/main/java/com/netflix/conductor/core/sync/noop/NoopLock.java @@ -10,10 +10,12 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.core.sync; +package com.netflix.conductor.core.sync.noop; import java.util.concurrent.TimeUnit; +import com.netflix.conductor.core.sync.Lock; + public class NoopLock implements Lock { @Override diff --git a/core/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/core/src/main/resources/META-INF/additional-spring-configuration-metadata.json index 4a58b5767d..b8d8114220 100644 --- a/core/src/main/resources/META-INF/additional-spring-configuration-metadata.json +++ b/core/src/main/resources/META-INF/additional-spring-configuration-metadata.json @@ -90,6 +90,17 @@ "description": "Enables the processor for the default event queues that conductor is configured to listen on.", "sourceType": "com.netflix.conductor.core.events.queue.DefaultEventQueueProcessor", "defaultValue": "true" + }, + { + "name": "conductor.workflow-status-listener.type", + "type": "java.lang.String", + "description": "The implementation of the workflow status listener to be used." + }, + { + "name": "conductor.workflow-execution-lock.type", + "type": "java.lang.String", + "description": "The implementation of the workflow execution lock to be used.", + "defaultValue": "noop_lock" } ], "hints": [ @@ -99,18 +110,28 @@ { "value": "dummy", "description": "Use the dummy no-op implementation as the external payload storage." - }, + } + ] + }, + { + "name": "conductor.workflow-status-listener.type", + "values": [ { - "value": "azureblob", - "description": "Use Azure Blob as the external payload storage." - }, + "value": "stub", + "description": "Use the no-op implementation of the workflow status listener." + } + ] + }, + { + "name": "conductor.workflow-execution-lock.type", + "values": [ { - "value": "s3", - "description": "Use AWS S3 as the external payload storage." + "value": "noop_lock", + "description": "Use the no-op implementation as the lock provider." }, { - "value": "postgres", - "description": "Use PostgreSQL as the external payload storage." + "value": "local_only", + "description": "Use the local in-memory cache based implementation as the lock provider." } ] } diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/lock/LocalOnlyLockTest.java b/core/src/test/java/com/netflix/conductor/core/sync/local/LocalOnlyLockTest.java similarity index 84% rename from contribs/src/test/java/com/netflix/conductor/contribs/lock/LocalOnlyLockTest.java rename to core/src/test/java/com/netflix/conductor/core/sync/local/LocalOnlyLockTest.java index c081631b34..25105423bc 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/lock/LocalOnlyLockTest.java +++ b/core/src/test/java/com/netflix/conductor/core/sync/local/LocalOnlyLockTest.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.lock; +package com.netflix.conductor.core.sync.local; import java.util.concurrent.TimeUnit; @@ -30,14 +30,14 @@ public class LocalOnlyLockTest { @Test public void testLockUnlock() { localOnlyLock.acquireLock("a", 100, 1000, TimeUnit.MILLISECONDS); - assertEquals(localOnlyLock.cache().size(), 1); - assertEquals(localOnlyLock.cache().getUnchecked("a").availablePermits(), 0); + assertEquals(localOnlyLock.cache().estimatedSize(), 1); + assertEquals(localOnlyLock.cache().get("a").availablePermits(), 0); assertEquals(localOnlyLock.scheduledFutures().size(), 1); localOnlyLock.releaseLock("a"); assertEquals(localOnlyLock.scheduledFutures().size(), 0); - assertEquals(localOnlyLock.cache().getUnchecked("a").availablePermits(), 1); + assertEquals(localOnlyLock.cache().get("a").availablePermits(), 1); localOnlyLock.deleteLock("a"); - assertEquals(localOnlyLock.cache().size(), 0); + assertEquals(localOnlyLock.cache().estimatedSize(), 0); } @Test(timeout = 10 * 1000) @@ -57,7 +57,7 @@ public void testLockLeaseTime() { localOnlyLock.acquireLock("a", 1000, 100, TimeUnit.MILLISECONDS); } localOnlyLock.acquireLock("a"); - assertEquals(0, localOnlyLock.cache().getUnchecked("a").availablePermits()); + assertEquals(0, localOnlyLock.cache().get("a").availablePermits()); localOnlyLock.releaseLock("a"); } @@ -70,7 +70,7 @@ public void testLockLeaseWithRelease() throws Exception { Thread.sleep(2000); localOnlyLock.acquireLock("b"); - assertEquals(0, localOnlyLock.cache().getUnchecked("b").availablePermits()); + assertEquals(0, localOnlyLock.cache().get("b").availablePermits()); localOnlyLock.releaseLock("b"); } @@ -78,7 +78,7 @@ public void testLockLeaseWithRelease() throws Exception { public void testRelease() { localOnlyLock.releaseLock("x54as4d2;23'4"); localOnlyLock.releaseLock("x54as4d2;23'4"); - assertEquals(1, localOnlyLock.cache().getUnchecked("x54as4d2;23'4").availablePermits()); + assertEquals(1, localOnlyLock.cache().get("x54as4d2;23'4").availablePermits()); } @Test diff --git a/dependencies.gradle b/dependencies.gradle index 213a6c0406..513ef25bc9 100644 --- a/dependencies.gradle +++ b/dependencies.gradle @@ -44,7 +44,7 @@ ext { revJsr311Api = '1.1.1' revKafka = '2.6.0' revMicrometer = '1.6.2' - revMockServerClient = '5.11.2' + revMockServerClient = '5.12.0' revNatsStreaming = '0.5.0' revOpenapi = '1.6.+' revPowerMock = '2.0.9' diff --git a/es6-persistence/dependencies.lock b/es6-persistence/dependencies.lock index 9945fa5cee..99aa8831cd 100644 --- a/es6-persistence/dependencies.lock +++ b/es6-persistence/dependencies.lock @@ -568,6 +568,12 @@ "org.elasticsearch:elasticsearch-x-content" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.spullara.mustache.java:compiler": { "locked": "0.9.3", "transitive": [ @@ -581,8 +587,9 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.4.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -927,8 +934,9 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -1915,6 +1923,12 @@ "org.elasticsearch:elasticsearch-x-content" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.docker-java:docker-java-api": { "locked": "3.2.8", "transitive": [ @@ -1946,8 +1960,9 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.4.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -2361,8 +2376,9 @@ "locked": "3.1.6" }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, diff --git a/es7-persistence/README.md b/es7-persistence/README.md deleted file mode 100644 index 8cf4fd7abf..0000000000 --- a/es7-persistence/README.md +++ /dev/null @@ -1,86 +0,0 @@ -# ES7 Persistence - -This module provides ES7 persistence when indexing workflows and tasks. - -## ES Breaking changes - -From ES6 to ES7 there were significant breaking changes which affected ES7-persistence module implementation. -* Mapping type deprecation -* Templates API -* TransportClient deprecation - -More information can be found here: https://www.elastic.co/guide/en/elasticsearch/reference/current/breaking-changes-7.0.html - - -## Build - -1. In order to use the ES7, you must change the following files from ES6 to ES7: - -https://github.com/Netflix/conductor/blob/main/build.gradle -https://github.com/Netflix/conductor/blob/main/server/src/main/resources/application.properties - -In file: - -- /build.gradle - -change ext['elasticsearch.version'] from revElasticSearch6 to revElasticSearch7 - - -In file: - -- /server/src/main/resources/application.properties - -change conductor.elasticsearch.version from 6 to 7 - -Also you need to recreate dependencies.lock files with ES7 dependencies. To do that delete all dependencies.lock files and then run: - -``` -./gradlew generateLock updateLock saveLock -``` - - -2. To use the ES7 for all modules include test-harness, you must change also the following files: - -https://github.com/Netflix/conductor/blob/main/test-harness/build.gradle -https://github.com/Netflix/conductor/blob/main/test-harness/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java - -In file: - -- /test-harness/build.gradle - -* change module inclusion from 'es6-persistence' to 'es7-persistence' - -In file: - -- /test-harness/src/test/java/com/netflix/conductor/test/integration/AbstractEndToEndTest.java - -* change conductor.elasticsearch.version from 6 to 7 -* change DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch-oss").withTag("6.8.12") to DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch-oss").withTag("7.6.2") - - -## Usage - -This module uses the following configuration options: - -* `conductor.elasticsearch.url` - A comma separated list of schema/host/port of the ES nodes to communicate with. -Schema can be `http` or `https`. If schema is ignored then `http` transport will be used; -Since ES deprecated TransportClient, conductor will use only the REST transport protocol. -* `conductor.elasticsearch.indexPrefix` - The name of the workflow and task index. -Defaults to `conductor` -* `conductor.elasticsearch.asyncWorkerQueueSize` - Worker Queue size used in executor service for async methods in IndexDao -Defaults to `100` -* `conductor.elasticsearch.asyncMaxPoolSize` - Maximum thread pool size in executor service for async methods in IndexDao -Defaults to `12` -* `conductor.elasticsearch.asyncBufferFlushTimeout` - Timeout (in seconds) for the in-memory to be flushed if not explicitly indexed -Defaults to `10` - -### BASIC Authentication -If you need to pass user/password to connect to ES, add the following properties to your config file -* conductor.elasticsearch.username -* conductor.elasticsearch.password - -Example -``` -conductor.elasticsearch.username=someusername -conductor.elasticsearch.password=somepassword -``` diff --git a/es7-persistence/build.gradle b/es7-persistence/build.gradle deleted file mode 100644 index 793053deda..0000000000 --- a/es7-persistence/build.gradle +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -plugins { - id 'com.github.johnrengelman.shadow' version '6.1.0' - id 'java' -} - -configurations { - // Prevent shaded dependencies from being published, while keeping them available to tests - shadow.extendsFrom compileOnly - testRuntime.extendsFrom compileOnly -} - -ext['elasticsearch.version'] = revElasticSearch7 - -dependencies { - implementation project(':conductor-common') - implementation project(':conductor-core') - - compileOnly 'org.springframework.boot:spring-boot-starter' - compileOnly 'org.springframework.retry:spring-retry' - - implementation "commons-io:commons-io:${revCommonsIo}" - implementation "org.apache.commons:commons-lang3" - // SBMTODO: remove guava dep - implementation "com.google.guava:guava:${revGuava}" - - implementation "com.fasterxml.jackson.core:jackson-databind" - implementation "com.fasterxml.jackson.core:jackson-core" - - implementation "org.elasticsearch.client:elasticsearch-rest-client" - implementation "org.elasticsearch.client:elasticsearch-rest-high-level-client" - - testImplementation 'org.springframework.retry:spring-retry' - testImplementation "org.awaitility:awaitility:${revAwaitility}" - testImplementation "org.testcontainers:elasticsearch:${revTestContainer}" - testImplementation project(':conductor-common').sourceSets.test.output -} - -// Drop the classifier and delete jar task actions to replace the regular jar artifact with the shadow artifact -shadowJar { - configurations = [project.configurations.shadow] - classifier = null - - // Service files are not included by default. - mergeServiceFiles { - include 'META-INF/services/*' - include 'META-INF/maven/*' - } -} - -jar.enabled = false -jar.dependsOn shadowJar diff --git a/es7-persistence/dependencies.lock b/es7-persistence/dependencies.lock deleted file mode 100644 index a929e007b8..0000000000 --- a/es7-persistence/dependencies.lock +++ /dev/null @@ -1,2671 +0,0 @@ -{ - "annotationProcessor": { - "org.springframework.boot:spring-boot-configuration-processor": { - "locked": "2.3.12.RELEASE" - } - }, - "compileClasspath": { - "com.carrotsearch:hppc": { - "locked": "0.8.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-smile": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.github.spullara.mustache.java:compiler": { - "locked": "0.9.6", - "transitive": [ - "org.elasticsearch.plugin:lang-mustache-client" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.tdunning:t-digest": { - "locked": "3.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "commons-io:commons-io": { - "locked": "2.7" - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "joda-time:joda-time": { - "locked": "2.10.4", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "net.sf.jopt-simple:jopt-simple": { - "locked": "5.0.2", - "transitive": [ - "org.elasticsearch:elasticsearch-cli" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.httpcomponents:httpasyncclient": { - "locked": "4.1.4", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore-nio": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.apache.lucene:lucene-analyzers-common": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-backward-codecs": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-core": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-grouping": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-highlighter": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-join": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-memory": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-misc": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queries": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queryparser": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-sandbox": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial-extras": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial3d": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-suggest": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-high-level-client": { - "locked": "7.6.2" - }, - "org.elasticsearch.plugin:aggs-matrix-stats-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:lang-mustache-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:mapper-extras-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:parent-join-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:rank-eval-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch-cli": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-core": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch", - "org.elasticsearch:elasticsearch-cli", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "org.elasticsearch:elasticsearch-geo": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-secure-sm": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-x-content": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:jna": { - "locked": "4.5.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.9", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.retry:spring-retry", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content", - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "runtimeClasspath": { - "com.carrotsearch:hppc": { - "locked": "0.8.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-smile": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.github.spullara.mustache.java:compiler": { - "locked": "0.9.6", - "transitive": [ - "org.elasticsearch.plugin:lang-mustache-client" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.tdunning:t-digest": { - "locked": "3.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "joda-time:joda-time": { - "locked": "2.10.4", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "net.sf.jopt-simple:jopt-simple": { - "locked": "5.0.2", - "transitive": [ - "org.elasticsearch:elasticsearch-cli" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.httpcomponents:httpasyncclient": { - "locked": "4.1.4", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore-nio": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.lucene:lucene-analyzers-common": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-backward-codecs": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-core": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-grouping": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-highlighter": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-join": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-memory": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-misc": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queries": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queryparser": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-sandbox": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial-extras": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial3d": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-suggest": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-high-level-client": { - "locked": "7.6.2" - }, - "org.elasticsearch.plugin:aggs-matrix-stats-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:lang-mustache-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:mapper-extras-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:parent-join-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:rank-eval-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch-cli": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-core": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch", - "org.elasticsearch:elasticsearch-cli", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "org.elasticsearch:elasticsearch-geo": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-secure-sm": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-x-content": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:jna": { - "locked": "4.5.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.9", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "org.apache.logging.log4j:log4j-slf4j-impl" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - } - }, - "shadow": { - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.retry:spring-retry", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testCompileClasspath": { - "com.carrotsearch:hppc": { - "locked": "0.8.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4" - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-smile": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.spullara.mustache.java:compiler": { - "locked": "0.9.6", - "transitive": [ - "org.elasticsearch.plugin:lang-mustache-client" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.tdunning:t-digest": { - "locked": "3.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "commons-io:commons-io": { - "locked": "2.7" - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "joda-time:joda-time": { - "locked": "2.10.4", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "net.sf.jopt-simple:jopt-simple": { - "locked": "5.0.2", - "transitive": [ - "org.elasticsearch:elasticsearch-cli" - ] - }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.httpcomponents:httpasyncclient": { - "locked": "4.1.4", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore-nio": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.apache.lucene:lucene-analyzers-common": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-backward-codecs": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-core": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-grouping": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-highlighter": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-join": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-memory": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-misc": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queries": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queryparser": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-sandbox": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial-extras": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial3d": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-suggest": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.awaitility:awaitility": { - "locked": "3.1.6" - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-high-level-client": { - "locked": "7.6.2" - }, - "org.elasticsearch.plugin:aggs-matrix-stats-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:lang-mustache-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:mapper-extras-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:parent-join-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:rank-eval-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch-cli": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-core": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch", - "org.elasticsearch:elasticsearch-cli", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "org.elasticsearch:elasticsearch-geo": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-secure-sm": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-x-content": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:jna": { - "locked": "4.5.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.hamcrest:hamcrest-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.hamcrest:hamcrest-core": { - "locked": "2.2", - "transitive": [ - "junit:junit", - "org.awaitility:awaitility", - "org.hamcrest:hamcrest-library" - ] - }, - "org.hamcrest:hamcrest-library": { - "locked": "2.2", - "transitive": [ - "org.awaitility:awaitility" - ] - }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.9", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-params" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.awaitility:awaitility", - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", - "com.jayway.jsonpath:json-path", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-test" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.testcontainers:elasticsearch": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:elasticsearch" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content", - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testRuntime": { - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "org.slf4j:jul-to-slf4j" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.retry:spring-retry", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testRuntimeClasspath": { - "com.carrotsearch:hppc": { - "locked": "0.8.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.github.docker-java:docker-java-api", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-smile": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.spullara.mustache.java:compiler": { - "locked": "0.9.6", - "transitive": [ - "org.elasticsearch.plugin:lang-mustache-client" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.tdunning:t-digest": { - "locked": "3.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "joda-time:joda-time": { - "locked": "2.10.4", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "net.sf.jopt-simple:jopt-simple": { - "locked": "5.0.2", - "transitive": [ - "org.elasticsearch:elasticsearch-cli" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.httpcomponents:httpasyncclient": { - "locked": "4.1.4", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.httpcomponents:httpcore-nio": { - "locked": "4.4.14", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-client" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.lucene:lucene-analyzers-common": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-backward-codecs": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-core": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-grouping": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-highlighter": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-join": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-memory": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-misc": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queries": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-queryparser": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-sandbox": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial-extras": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-spatial3d": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apache.lucene:lucene-suggest": { - "locked": "8.4.0", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.awaitility:awaitility": { - "locked": "3.1.6" - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.client:elasticsearch-rest-high-level-client": { - "locked": "7.6.2" - }, - "org.elasticsearch.plugin:aggs-matrix-stats-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:lang-mustache-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:mapper-extras-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:parent-join-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch.plugin:rank-eval-client": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch.client:elasticsearch-rest-high-level-client" - ] - }, - "org.elasticsearch:elasticsearch-cli": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-core": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch", - "org.elasticsearch:elasticsearch-cli", - "org.elasticsearch:elasticsearch-x-content" - ] - }, - "org.elasticsearch:elasticsearch-geo": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-secure-sm": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:elasticsearch-x-content": { - "locked": "7.6.2", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.elasticsearch:jna": { - "locked": "4.5.1", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.hamcrest:hamcrest-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.hamcrest:hamcrest-core": { - "locked": "2.2", - "transitive": [ - "junit:junit", - "org.awaitility:awaitility", - "org.hamcrest:hamcrest-library" - ] - }, - "org.hamcrest:hamcrest-library": { - "locked": "2.2", - "transitive": [ - "org.awaitility:awaitility" - ] - }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.9", - "transitive": [ - "org.elasticsearch:elasticsearch" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.mockito:mockito-junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-engine": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.awaitility:awaitility", - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.retry:spring-retry", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-test" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.testcontainers:elasticsearch": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:elasticsearch" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "org.elasticsearch:elasticsearch-x-content", - "org.springframework.boot:spring-boot-starter" - ] - } - } -} \ No newline at end of file diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java deleted file mode 100644 index 8a41791397..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchConditions.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.config; - -import org.springframework.boot.autoconfigure.condition.AllNestedConditions; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; - -public class ElasticSearchConditions { - - private ElasticSearchConditions() {} - - public static class ElasticSearchV7Enabled extends AllNestedConditions { - - ElasticSearchV7Enabled() { - super(ConfigurationPhase.PARSE_CONFIGURATION); - } - - @SuppressWarnings("unused") - @ConditionalOnProperty( - name = "conductor.indexing.enabled", - havingValue = "true", - matchIfMissing = true) - static class enabledIndexing {} - - @SuppressWarnings("unused") - @ConditionalOnProperty( - name = "conductor.elasticsearch.version", - havingValue = "7", - matchIfMissing = true) - static class enabledES7 {} - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java deleted file mode 100644 index 8b15d30b51..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchProperties.java +++ /dev/null @@ -1,228 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.config; - -import java.net.MalformedURLException; -import java.net.URL; -import java.time.Duration; -import java.time.temporal.ChronoUnit; -import java.util.Arrays; -import java.util.List; -import java.util.stream.Collectors; - -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; - -@ConfigurationProperties("conductor.elasticsearch") -public class ElasticSearchProperties { - - /** - * The comma separated list of urls for the elasticsearch cluster. Format -- - * host1:port1,host2:port2 - */ - private String url = "localhost:9300"; - - /** The index prefix to be used when creating indices */ - private String indexPrefix = "conductor"; - - /** The color of the elasticserach cluster to wait for to confirm healthy status */ - private String clusterHealthColor = "green"; - - /** The size of the batch to be used for bulk indexing in async mode */ - private int indexBatchSize = 1; - - /** The size of the queue used for holding async indexing tasks */ - private int asyncWorkerQueueSize = 100; - - /** The maximum number of threads allowed in the async pool */ - private int asyncMaxPoolSize = 12; - - /** - * The time in seconds after which the async buffers will be flushed (if no activity) to prevent - * data loss - */ - @DurationUnit(ChronoUnit.SECONDS) - private Duration asyncBufferFlushTimeout = Duration.ofSeconds(10); - - /** The number of shards that the index will be created with */ - private int indexShardCount = 5; - - /** The number of replicas that the index will be configured to have */ - private int indexReplicasCount = 1; - - /** The number of task log results that will be returned in the response */ - private int taskLogResultLimit = 10; - - /** The timeout in milliseconds used when requesting a connection from the connection manager */ - private int restClientConnectionRequestTimeout = -1; - - /** Used to control if index management is to be enabled or will be controlled externally */ - private boolean autoIndexManagementEnabled = true; - - /** - * Document types are deprecated in ES6 and removed from ES7. This property can be used to - * disable the use of specific document types with an override. This property is currently used - * in ES6 module. - * - *

Note that this property will only take effect if {@link - * ElasticSearchProperties#isAutoIndexManagementEnabled} is set to false and index management is - * handled outside of this module. - */ - private String documentTypeOverride = ""; - - /** Elasticsearch basic auth username */ - private String username; - - /** Elasticsearch basic auth password */ - private String password; - - public String getUrl() { - return url; - } - - public void setUrl(String url) { - this.url = url; - } - - public String getIndexPrefix() { - return indexPrefix; - } - - public void setIndexPrefix(String indexPrefix) { - this.indexPrefix = indexPrefix; - } - - public String getClusterHealthColor() { - return clusterHealthColor; - } - - public void setClusterHealthColor(String clusterHealthColor) { - this.clusterHealthColor = clusterHealthColor; - } - - public int getIndexBatchSize() { - return indexBatchSize; - } - - public void setIndexBatchSize(int indexBatchSize) { - this.indexBatchSize = indexBatchSize; - } - - public int getAsyncWorkerQueueSize() { - return asyncWorkerQueueSize; - } - - public void setAsyncWorkerQueueSize(int asyncWorkerQueueSize) { - this.asyncWorkerQueueSize = asyncWorkerQueueSize; - } - - public int getAsyncMaxPoolSize() { - return asyncMaxPoolSize; - } - - public void setAsyncMaxPoolSize(int asyncMaxPoolSize) { - this.asyncMaxPoolSize = asyncMaxPoolSize; - } - - public Duration getAsyncBufferFlushTimeout() { - return asyncBufferFlushTimeout; - } - - public void setAsyncBufferFlushTimeout(Duration asyncBufferFlushTimeout) { - this.asyncBufferFlushTimeout = asyncBufferFlushTimeout; - } - - public int getIndexShardCount() { - return indexShardCount; - } - - public void setIndexShardCount(int indexShardCount) { - this.indexShardCount = indexShardCount; - } - - public int getIndexReplicasCount() { - return indexReplicasCount; - } - - public void setIndexReplicasCount(int indexReplicasCount) { - this.indexReplicasCount = indexReplicasCount; - } - - public int getTaskLogResultLimit() { - return taskLogResultLimit; - } - - public void setTaskLogResultLimit(int taskLogResultLimit) { - this.taskLogResultLimit = taskLogResultLimit; - } - - public int getRestClientConnectionRequestTimeout() { - return restClientConnectionRequestTimeout; - } - - public void setRestClientConnectionRequestTimeout(int restClientConnectionRequestTimeout) { - this.restClientConnectionRequestTimeout = restClientConnectionRequestTimeout; - } - - public boolean isAutoIndexManagementEnabled() { - return autoIndexManagementEnabled; - } - - public void setAutoIndexManagementEnabled(boolean autoIndexManagementEnabled) { - this.autoIndexManagementEnabled = autoIndexManagementEnabled; - } - - public String getDocumentTypeOverride() { - return documentTypeOverride; - } - - public void setDocumentTypeOverride(String documentTypeOverride) { - this.documentTypeOverride = documentTypeOverride; - } - - public String getUsername() { - return username; - } - - public void setUsername(String username) { - this.username = username; - } - - public String getPassword() { - return password; - } - - public void setPassword(String password) { - this.password = password; - } - - public List toURLs() { - String clusterAddress = getUrl(); - String[] hosts = clusterAddress.split(","); - return Arrays.stream(hosts) - .map( - host -> - (host.startsWith("http://") || host.startsWith("https://")) - ? toURL(host) - : toURL("http://" + host)) - .collect(Collectors.toList()); - } - - private URL toURL(String url) { - try { - return new URL(url); - } catch (MalformedURLException e) { - throw new IllegalArgumentException(url + "can not be converted to java.net.URL"); - } - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java deleted file mode 100644 index 6aab66f0d8..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/config/ElasticSearchV7Configuration.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.config; - -import java.net.URL; -import java.util.List; - -import org.apache.http.HttpHost; -import org.apache.http.auth.AuthScope; -import org.apache.http.auth.UsernamePasswordCredentials; -import org.apache.http.client.CredentialsProvider; -import org.apache.http.impl.client.BasicCredentialsProvider; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestClientBuilder; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Conditional; -import org.springframework.context.annotation.Configuration; -import org.springframework.retry.backoff.FixedBackOffPolicy; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.es7.dao.index.ElasticSearchRestDAOV7; - -import com.fasterxml.jackson.databind.ObjectMapper; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(ElasticSearchProperties.class) -@Conditional(ElasticSearchConditions.ElasticSearchV7Enabled.class) -public class ElasticSearchV7Configuration { - - private static final Logger log = LoggerFactory.getLogger(ElasticSearchV7Configuration.class); - - @Bean - public RestClient restClient(ElasticSearchProperties properties) { - RestClientBuilder restClientBuilder = - RestClient.builder(convertToHttpHosts(properties.toURLs())); - if (properties.getRestClientConnectionRequestTimeout() > 0) { - restClientBuilder.setRequestConfigCallback( - requestConfigBuilder -> - requestConfigBuilder.setConnectionRequestTimeout( - properties.getRestClientConnectionRequestTimeout())); - } - return restClientBuilder.build(); - } - - @Bean - public RestClientBuilder restClientBuilder(ElasticSearchProperties properties) { - RestClientBuilder builder = RestClient.builder(convertToHttpHosts(properties.toURLs())); - - if (properties.getUsername() != null && properties.getPassword() != null) { - log.info( - "Configure ElasticSearch with BASIC authentication. User:{}", - properties.getUsername()); - final CredentialsProvider credentialsProvider = new BasicCredentialsProvider(); - credentialsProvider.setCredentials( - AuthScope.ANY, - new UsernamePasswordCredentials( - properties.getUsername(), properties.getPassword())); - builder.setHttpClientConfigCallback( - httpClientBuilder -> - httpClientBuilder.setDefaultCredentialsProvider(credentialsProvider)); - } else { - log.info("Configure ElasticSearch with no authentication."); - } - return builder; - } - - @Bean - public IndexDAO es7IndexDAO( - RestClientBuilder restClientBuilder, - @Qualifier("es7RetryTemplate") RetryTemplate retryTemplate, - ElasticSearchProperties properties, - ObjectMapper objectMapper) { - String url = properties.getUrl(); - return new ElasticSearchRestDAOV7( - restClientBuilder, retryTemplate, properties, objectMapper); - } - - @Bean - public RetryTemplate es7RetryTemplate() { - RetryTemplate retryTemplate = new RetryTemplate(); - FixedBackOffPolicy fixedBackOffPolicy = new FixedBackOffPolicy(); - fixedBackOffPolicy.setBackOffPeriod(1000L); - retryTemplate.setBackOffPolicy(fixedBackOffPolicy); - return retryTemplate; - } - - private HttpHost[] convertToHttpHosts(List hosts) { - return hosts.stream() - .map(host -> new HttpHost(host.getHost(), host.getPort(), host.getProtocol())) - .toArray(HttpHost[]::new); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java deleted file mode 100644 index b046d3eb61..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestBuilderWrapper.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.util.Objects; - -import org.elasticsearch.action.ActionFuture; -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.bulk.BulkResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.springframework.lang.NonNull; - -/** Thread-safe wrapper for {@link BulkRequestBuilder}. */ -public class BulkRequestBuilderWrapper { - private final BulkRequestBuilder bulkRequestBuilder; - - public BulkRequestBuilderWrapper(@NonNull BulkRequestBuilder bulkRequestBuilder) { - this.bulkRequestBuilder = Objects.requireNonNull(bulkRequestBuilder); - } - - public void add(@NonNull UpdateRequest req) { - synchronized (bulkRequestBuilder) { - bulkRequestBuilder.add(Objects.requireNonNull(req)); - } - } - - public void add(@NonNull IndexRequest req) { - synchronized (bulkRequestBuilder) { - bulkRequestBuilder.add(Objects.requireNonNull(req)); - } - } - - public int numberOfActions() { - synchronized (bulkRequestBuilder) { - return bulkRequestBuilder.numberOfActions(); - } - } - - public ActionFuture execute() { - synchronized (bulkRequestBuilder) { - return bulkRequestBuilder.execute(); - } - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java deleted file mode 100644 index 38d38d3d61..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/BulkRequestWrapper.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.util.Objects; - -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.springframework.lang.NonNull; - -/** Thread-safe wrapper for {@link BulkRequest}. */ -class BulkRequestWrapper { - private final BulkRequest bulkRequest; - - BulkRequestWrapper(@NonNull BulkRequest bulkRequest) { - this.bulkRequest = Objects.requireNonNull(bulkRequest); - } - - public void add(@NonNull UpdateRequest req) { - synchronized (bulkRequest) { - bulkRequest.add(Objects.requireNonNull(req)); - } - } - - public void add(@NonNull IndexRequest req) { - synchronized (bulkRequest) { - bulkRequest.add(Objects.requireNonNull(req)); - } - } - - BulkRequest get() { - return bulkRequest; - } - - int numberOfActions() { - synchronized (bulkRequest) { - return bulkRequest.numberOfActions(); - } - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java deleted file mode 100644 index 02a225bd01..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchBaseDAO.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.io.IOException; -import java.util.ArrayList; - -import org.apache.commons.io.IOUtils; -import org.apache.commons.lang3.StringUtils; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.index.query.QueryStringQueryBuilder; - -import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.es7.dao.query.parser.Expression; -import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; - -abstract class ElasticSearchBaseDAO implements IndexDAO { - - String indexPrefix; - ObjectMapper objectMapper; - - String loadTypeMappingSource(String path) throws IOException { - return applyIndexPrefixToTemplate( - IOUtils.toString(ElasticSearchBaseDAO.class.getResourceAsStream(path))); - } - - private String applyIndexPrefixToTemplate(String text) throws JsonProcessingException { - String indexPatternsFieldName = "index_patterns"; - JsonNode root = objectMapper.readTree(text); - if (root != null) { - JsonNode indexPatternsNodeValue = root.get(indexPatternsFieldName); - if (indexPatternsNodeValue != null && indexPatternsNodeValue.isArray()) { - ArrayList patternsWithPrefix = new ArrayList<>(); - indexPatternsNodeValue.forEach( - v -> { - String patternText = v.asText(); - StringBuilder sb = new StringBuilder(); - if (patternText.startsWith("*")) { - sb.append("*") - .append(indexPrefix) - .append("_") - .append(patternText.substring(1)); - } else { - sb.append(indexPrefix).append("_").append(patternText); - } - patternsWithPrefix.add(sb.toString()); - }); - ((ObjectNode) root) - .set(indexPatternsFieldName, objectMapper.valueToTree(patternsWithPrefix)); - System.out.println( - objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root)); - return objectMapper.writerWithDefaultPrettyPrinter().writeValueAsString(root); - } - } - return text; - } - - BoolQueryBuilder boolQueryBuilder(String expression, String queryString) - throws ParserException { - QueryBuilder queryBuilder = QueryBuilders.matchAllQuery(); - if (StringUtils.isNotEmpty(expression)) { - Expression exp = Expression.fromString(expression); - queryBuilder = exp.getFilterBuilder(); - } - BoolQueryBuilder filterQuery = QueryBuilders.boolQuery().must(queryBuilder); - QueryStringQueryBuilder stringQuery = QueryBuilders.queryStringQuery(queryString); - return QueryBuilders.boolQuery().must(stringQuery).must(filterQuery); - } - - protected String getIndexName(String documentType) { - return indexPrefix + "_" + documentType; - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java deleted file mode 100644 index d56b26b622..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDAOV7.java +++ /dev/null @@ -1,1135 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.io.IOException; -import java.io.InputStream; -import java.text.SimpleDateFormat; -import java.time.Instant; -import java.time.LocalDate; -import java.util.*; -import java.util.concurrent.*; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import javax.annotation.PostConstruct; -import javax.annotation.PreDestroy; - -import org.apache.commons.io.IOUtils; -import org.apache.http.HttpEntity; -import org.apache.http.HttpStatus; -import org.apache.http.entity.ContentType; -import org.apache.http.nio.entity.NByteArrayEntity; -import org.apache.http.nio.entity.NStringEntity; -import org.apache.http.util.EntityUtils; -import org.elasticsearch.action.DocWriteResponse; -import org.elasticsearch.action.bulk.BulkRequest; -import org.elasticsearch.action.delete.DeleteRequest; -import org.elasticsearch.action.delete.DeleteResponse; -import org.elasticsearch.action.get.GetRequest; -import org.elasticsearch.action.get.GetResponse; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.search.SearchRequest; -import org.elasticsearch.action.search.SearchResponse; -import org.elasticsearch.action.update.UpdateRequest; -import org.elasticsearch.client.*; -import org.elasticsearch.client.core.CountRequest; -import org.elasticsearch.client.core.CountResponse; -import org.elasticsearch.common.xcontent.XContentType; -import org.elasticsearch.index.query.BoolQueryBuilder; -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; -import org.elasticsearch.search.SearchHit; -import org.elasticsearch.search.builder.SearchSourceBuilder; -import org.elasticsearch.search.sort.FieldSortBuilder; -import org.elasticsearch.search.sort.SortOrder; -import org.joda.time.DateTime; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.annotations.Trace; -import com.netflix.conductor.common.metadata.events.EventExecution; -import com.netflix.conductor.common.metadata.tasks.TaskExecLog; -import com.netflix.conductor.common.run.SearchResult; -import com.netflix.conductor.common.run.TaskSummary; -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.dao.IndexDAO; -import com.netflix.conductor.es7.config.ElasticSearchProperties; -import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; -import com.netflix.conductor.metrics.Monitors; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.fasterxml.jackson.databind.node.ObjectNode; -import com.fasterxml.jackson.databind.type.MapType; -import com.fasterxml.jackson.databind.type.TypeFactory; - -@Trace -public class ElasticSearchRestDAOV7 extends ElasticSearchBaseDAO implements IndexDAO { - - private static final Logger logger = LoggerFactory.getLogger(ElasticSearchRestDAOV7.class); - - private static final int CORE_POOL_SIZE = 6; - private static final long KEEP_ALIVE_TIME = 1L; - - private static final String WORKFLOW_DOC_TYPE = "workflow"; - private static final String TASK_DOC_TYPE = "task"; - private static final String LOG_DOC_TYPE = "task_log"; - private static final String EVENT_DOC_TYPE = "event"; - private static final String MSG_DOC_TYPE = "message"; - - private static final TimeZone GMT = TimeZone.getTimeZone("GMT"); - private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW"); - - private @interface HttpMethod { - - String GET = "GET"; - String POST = "POST"; - String PUT = "PUT"; - String HEAD = "HEAD"; - } - - private static final String className = ElasticSearchRestDAOV7.class.getSimpleName(); - - private final String workflowIndexName; - private final String taskIndexName; - private final String eventIndexPrefix; - private String eventIndexName; - private final String messageIndexPrefix; - private String messageIndexName; - private String logIndexName; - private final String logIndexPrefix; - - private final String clusterHealthColor; - private final RestHighLevelClient elasticSearchClient; - private final RestClient elasticSearchAdminClient; - private final ExecutorService executorService; - private final ExecutorService logExecutorService; - private final ConcurrentHashMap bulkRequests; - private final int indexBatchSize; - private final int asyncBufferFlushTimeout; - private final ElasticSearchProperties properties; - private final RetryTemplate retryTemplate; - - static { - SIMPLE_DATE_FORMAT.setTimeZone(GMT); - } - - public ElasticSearchRestDAOV7( - RestClientBuilder restClientBuilder, - RetryTemplate retryTemplate, - ElasticSearchProperties properties, - ObjectMapper objectMapper) { - - this.objectMapper = objectMapper; - this.elasticSearchAdminClient = restClientBuilder.build(); - this.elasticSearchClient = new RestHighLevelClient(restClientBuilder); - this.clusterHealthColor = properties.getClusterHealthColor(); - this.bulkRequests = new ConcurrentHashMap<>(); - this.indexBatchSize = properties.getIndexBatchSize(); - this.asyncBufferFlushTimeout = (int) properties.getAsyncBufferFlushTimeout().getSeconds(); - this.properties = properties; - - this.indexPrefix = properties.getIndexPrefix(); - - this.workflowIndexName = getIndexName(WORKFLOW_DOC_TYPE); - this.taskIndexName = getIndexName(TASK_DOC_TYPE); - this.logIndexPrefix = this.indexPrefix + "_" + LOG_DOC_TYPE; - this.messageIndexPrefix = this.indexPrefix + "_" + MSG_DOC_TYPE; - this.eventIndexPrefix = this.indexPrefix + "_" + EVENT_DOC_TYPE; - int workerQueueSize = properties.getAsyncWorkerQueueSize(); - int maximumPoolSize = properties.getAsyncMaxPoolSize(); - - // Set up a workerpool for performing async operations. - this.executorService = - new ThreadPoolExecutor( - CORE_POOL_SIZE, - maximumPoolSize, - KEEP_ALIVE_TIME, - TimeUnit.MINUTES, - new LinkedBlockingQueue<>(workerQueueSize), - (runnable, executor) -> { - logger.warn( - "Request {} to async dao discarded in executor {}", - runnable, - executor); - Monitors.recordDiscardedIndexingCount("indexQueue"); - }); - - // Set up a workerpool for performing async operations for task_logs, event_executions, - // message - int corePoolSize = 1; - maximumPoolSize = 2; - long keepAliveTime = 30L; - this.logExecutorService = - new ThreadPoolExecutor( - corePoolSize, - maximumPoolSize, - keepAliveTime, - TimeUnit.SECONDS, - new LinkedBlockingQueue<>(workerQueueSize), - (runnable, executor) -> { - logger.warn( - "Request {} to async log dao discarded in executor {}", - runnable, - executor); - Monitors.recordDiscardedIndexingCount("logQueue"); - }); - - Executors.newSingleThreadScheduledExecutor() - .scheduleAtFixedRate(this::flushBulkRequests, 60, 30, TimeUnit.SECONDS); - this.retryTemplate = retryTemplate; - } - - @PreDestroy - private void shutdown() { - logger.info("Gracefully shutdown executor service"); - shutdownExecutorService(logExecutorService); - shutdownExecutorService(executorService); - } - - private void shutdownExecutorService(ExecutorService execService) { - try { - execService.shutdown(); - if (execService.awaitTermination(30, TimeUnit.SECONDS)) { - logger.debug("tasks completed, shutting down"); - } else { - logger.warn("Forcing shutdown after waiting for 30 seconds"); - execService.shutdownNow(); - } - } catch (InterruptedException ie) { - logger.warn( - "Shutdown interrupted, invoking shutdownNow on scheduledThreadPoolExecutor for delay queue"); - execService.shutdownNow(); - Thread.currentThread().interrupt(); - } - } - - @Override - @PostConstruct - public void setup() throws Exception { - waitForHealthyCluster(); - - if (properties.isAutoIndexManagementEnabled()) { - createIndexesTemplates(); - createWorkflowIndex(); - createTaskIndex(); - } - } - - private void createIndexesTemplates() { - try { - initIndexesTemplates(); - updateIndexesNames(); - Executors.newScheduledThreadPool(1) - .scheduleAtFixedRate(this::updateIndexesNames, 0, 1, TimeUnit.HOURS); - } catch (Exception e) { - logger.error("Error creating index templates!", e); - } - } - - private void initIndexesTemplates() { - initIndexTemplate(LOG_DOC_TYPE); - initIndexTemplate(EVENT_DOC_TYPE); - initIndexTemplate(MSG_DOC_TYPE); - } - - /** Initializes the index with the required templates and mappings. */ - private void initIndexTemplate(String type) { - String template = "template_" + type; - try { - if (doesResourceNotExist("/_template/" + template)) { - logger.info("Creating the index template '" + template + "'"); - InputStream stream = - ElasticSearchRestDAOV7.class.getResourceAsStream("/" + template + ".json"); - byte[] templateSource = IOUtils.toByteArray(stream); - - HttpEntity entity = - new NByteArrayEntity(templateSource, ContentType.APPLICATION_JSON); - Request request = new Request(HttpMethod.PUT, "/_template/" + template); - request.setEntity(entity); - String test = - IOUtils.toString( - elasticSearchAdminClient - .performRequest(request) - .getEntity() - .getContent()); - } - } catch (Exception e) { - logger.error("Failed to init " + template, e); - } - } - - private void updateIndexesNames() { - logIndexName = updateIndexName(LOG_DOC_TYPE); - eventIndexName = updateIndexName(EVENT_DOC_TYPE); - messageIndexName = updateIndexName(MSG_DOC_TYPE); - } - - private String updateIndexName(String type) { - String indexName = - this.indexPrefix + "_" + type + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - try { - addIndex(indexName); - return indexName; - } catch (IOException e) { - logger.error("Failed to update log index name: {}", indexName, e); - throw new ApplicationException(e.getMessage(), e); - } - } - - private void createWorkflowIndex() { - String indexName = getIndexName(WORKFLOW_DOC_TYPE); - try { - addIndex(indexName, "/mappings_docType_workflow.json"); - } catch (IOException e) { - logger.error("Failed to initialize index '{}'", indexName, e); - } - } - - private void createTaskIndex() { - String indexName = getIndexName(TASK_DOC_TYPE); - try { - addIndex(indexName, "/mappings_docType_task.json"); - } catch (IOException e) { - logger.error("Failed to initialize index '{}'", indexName, e); - } - } - - /** - * Waits for the ES cluster to become green. - * - * @throws Exception If there is an issue connecting with the ES cluster. - */ - private void waitForHealthyCluster() throws Exception { - Map params = new HashMap<>(); - params.put("wait_for_status", this.clusterHealthColor); - params.put("timeout", "30s"); - Request request = new Request("GET", "/_cluster/health"); - request.addParameters(params); - elasticSearchAdminClient.performRequest(request); - } - - /** - * Adds an index to elasticsearch if it does not exist. - * - * @param index The name of the index to create. - * @param mappingFilename Index mapping filename - * @throws IOException If an error occurred during requests to ES. - */ - private void addIndex(String index, final String mappingFilename) throws IOException { - logger.info("Adding index '{}'...", index); - String resourcePath = "/" + index; - if (doesResourceNotExist(resourcePath)) { - try { - ObjectNode setting = objectMapper.createObjectNode(); - ObjectNode indexSetting = objectMapper.createObjectNode(); - ObjectNode root = objectMapper.createObjectNode(); - indexSetting.put("number_of_shards", properties.getIndexShardCount()); - indexSetting.put("number_of_replicas", properties.getIndexReplicasCount()); - JsonNode mappingNodeValue = - objectMapper.readTree(loadTypeMappingSource(mappingFilename)); - root.set("settings", indexSetting); - root.set("mappings", mappingNodeValue); - Request request = new Request(HttpMethod.PUT, resourcePath); - request.setEntity( - new NStringEntity( - objectMapper.writeValueAsString(root), - ContentType.APPLICATION_JSON)); - elasticSearchAdminClient.performRequest(request); - logger.info("Added '{}' index", index); - } catch (ResponseException e) { - - boolean errorCreatingIndex = true; - - Response errorResponse = e.getResponse(); - if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { - JsonNode root = - objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity())); - String errorCode = root.get("error").get("type").asText(); - if ("index_already_exists_exception".equals(errorCode)) { - errorCreatingIndex = false; - } - } - - if (errorCreatingIndex) { - throw e; - } - } - } else { - logger.info("Index '{}' already exists", index); - } - } - /** - * Adds an index to elasticsearch if it does not exist. - * - * @param index The name of the index to create. - * @throws IOException If an error occurred during requests to ES. - */ - private void addIndex(final String index) throws IOException { - - logger.info("Adding index '{}'...", index); - - String resourcePath = "/" + index; - - if (doesResourceNotExist(resourcePath)) { - - try { - ObjectNode setting = objectMapper.createObjectNode(); - ObjectNode indexSetting = objectMapper.createObjectNode(); - - indexSetting.put("number_of_shards", properties.getIndexShardCount()); - indexSetting.put("number_of_replicas", properties.getIndexReplicasCount()); - - setting.set("settings", indexSetting); - - Request request = new Request(HttpMethod.PUT, resourcePath); - request.setEntity( - new NStringEntity(setting.toString(), ContentType.APPLICATION_JSON)); - elasticSearchAdminClient.performRequest(request); - logger.info("Added '{}' index", index); - } catch (ResponseException e) { - - boolean errorCreatingIndex = true; - - Response errorResponse = e.getResponse(); - if (errorResponse.getStatusLine().getStatusCode() == HttpStatus.SC_BAD_REQUEST) { - JsonNode root = - objectMapper.readTree(EntityUtils.toString(errorResponse.getEntity())); - String errorCode = root.get("error").get("type").asText(); - if ("index_already_exists_exception".equals(errorCode)) { - errorCreatingIndex = false; - } - } - - if (errorCreatingIndex) { - throw e; - } - } - } else { - logger.info("Index '{}' already exists", index); - } - } - - /** - * Adds a mapping type to an index if it does not exist. - * - * @param index The name of the index. - * @param mappingType The name of the mapping type. - * @param mappingFilename The name of the mapping file to use to add the mapping if it does not - * exist. - * @throws IOException If an error occurred during requests to ES. - */ - private void addMappingToIndex( - final String index, final String mappingType, final String mappingFilename) - throws IOException { - - logger.info("Adding '{}' mapping to index '{}'...", mappingType, index); - - String resourcePath = "/" + index + "/_mapping"; - - if (doesResourceNotExist(resourcePath)) { - HttpEntity entity = - new NByteArrayEntity( - loadTypeMappingSource(mappingFilename).getBytes(), - ContentType.APPLICATION_JSON); - Request request = new Request(HttpMethod.PUT, resourcePath); - request.setEntity(entity); - elasticSearchAdminClient.performRequest(request); - logger.info("Added '{}' mapping", mappingType); - } else { - logger.info("Mapping '{}' already exists", mappingType); - } - } - - /** - * Determines whether a resource exists in ES. This will call a GET method to a particular path - * and return true if status 200; false otherwise. - * - * @param resourcePath The path of the resource to get. - * @return True if it exists; false otherwise. - * @throws IOException If an error occurred during requests to ES. - */ - public boolean doesResourceExist(final String resourcePath) throws IOException { - Request request = new Request(HttpMethod.HEAD, resourcePath); - Response response = elasticSearchAdminClient.performRequest(request); - return response.getStatusLine().getStatusCode() == HttpStatus.SC_OK; - } - - /** - * The inverse of doesResourceExist. - * - * @param resourcePath The path of the resource to check. - * @return True if it does not exist; false otherwise. - * @throws IOException If an error occurred during requests to ES. - */ - public boolean doesResourceNotExist(final String resourcePath) throws IOException { - return !doesResourceExist(resourcePath); - } - - @Override - public void indexWorkflow(WorkflowSummary workflow) { - try { - long startTime = Instant.now().toEpochMilli(); - String workflowId = workflow.getWorkflowId(); - byte[] docBytes = objectMapper.writeValueAsBytes(workflow); - - IndexRequest request = - new IndexRequest(workflowIndexName) - .id(workflowId) - .source(docBytes, XContentType.JSON); - elasticSearchClient.index(request, RequestOptions.DEFAULT); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for indexing workflow: {}", endTime - startTime, workflowId); - Monitors.recordESIndexTime("index_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); - } catch (Exception e) { - Monitors.error(className, "indexWorkflow"); - logger.error("Failed to index workflow: {}", workflow.getWorkflowId(), e); - } - } - - @Override - public CompletableFuture asyncIndexWorkflow(WorkflowSummary workflow) { - return CompletableFuture.runAsync(() -> indexWorkflow(workflow), executorService); - } - - @Override - public void indexTask(TaskSummary task) { - try { - long startTime = Instant.now().toEpochMilli(); - String taskId = task.getTaskId(); - - indexObject(taskIndexName, TASK_DOC_TYPE, taskId, task); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for indexing task:{} in workflow: {}", - endTime - startTime, - taskId, - task.getWorkflowId()); - Monitors.recordESIndexTime("index_task", TASK_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); - } catch (Exception e) { - logger.error("Failed to index task: {}", task.getTaskId(), e); - } - } - - @Override - public CompletableFuture asyncIndexTask(TaskSummary task) { - return CompletableFuture.runAsync(() -> indexTask(task), executorService); - } - - @Override - public void addTaskExecutionLogs(List taskExecLogs) { - if (taskExecLogs.isEmpty()) { - return; - } - - long startTime = Instant.now().toEpochMilli(); - BulkRequest bulkRequest = new BulkRequest(); - for (TaskExecLog log : taskExecLogs) { - - byte[] docBytes; - try { - docBytes = objectMapper.writeValueAsBytes(log); - } catch (JsonProcessingException e) { - logger.error("Failed to convert task log to JSON for task {}", log.getTaskId()); - continue; - } - - IndexRequest request = new IndexRequest(logIndexName); - request.source(docBytes, XContentType.JSON); - bulkRequest.add(request); - } - - try { - elasticSearchClient.bulk(bulkRequest, RequestOptions.DEFAULT); - long endTime = Instant.now().toEpochMilli(); - logger.debug("Time taken {} for indexing taskExecutionLogs", endTime - startTime); - Monitors.recordESIndexTime( - "index_task_execution_logs", LOG_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); - } catch (Exception e) { - List taskIds = - taskExecLogs.stream().map(TaskExecLog::getTaskId).collect(Collectors.toList()); - logger.error("Failed to index task execution logs for tasks: {}", taskIds, e); - } - } - - @Override - public CompletableFuture asyncAddTaskExecutionLogs(List logs) { - return CompletableFuture.runAsync(() -> addTaskExecutionLogs(logs), logExecutorService); - } - - @Override - public List getTaskExecutionLogs(String taskId) { - try { - BoolQueryBuilder query = boolQueryBuilder("taskId='" + taskId + "'", "*"); - - // Create the searchObjectIdsViaExpression source - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(query); - searchSourceBuilder.sort(new FieldSortBuilder("createdTime").order(SortOrder.ASC)); - searchSourceBuilder.size(properties.getTaskLogResultLimit()); - - // Generate the actual request to send to ES. - SearchRequest searchRequest = new SearchRequest(logIndexPrefix + "*"); - searchRequest.source(searchSourceBuilder); - - SearchResponse response = - elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); - - return mapTaskExecLogsResponse(response); - } catch (Exception e) { - logger.error("Failed to get task execution logs for task: {}", taskId, e); - } - return null; - } - - private List mapTaskExecLogsResponse(SearchResponse response) throws IOException { - SearchHit[] hits = response.getHits().getHits(); - List logs = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String source = hit.getSourceAsString(); - TaskExecLog tel = objectMapper.readValue(source, TaskExecLog.class); - logs.add(tel); - } - return logs; - } - - @Override - public List getMessages(String queue) { - try { - BoolQueryBuilder query = boolQueryBuilder("queue='" + queue + "'", "*"); - - // Create the searchObjectIdsViaExpression source - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(query); - searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC)); - - // Generate the actual request to send to ES. - SearchRequest searchRequest = new SearchRequest(messageIndexPrefix + "*"); - searchRequest.source(searchSourceBuilder); - - SearchResponse response = - elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); - return mapGetMessagesResponse(response); - } catch (Exception e) { - logger.error("Failed to get messages for queue: {}", queue, e); - } - return null; - } - - private List mapGetMessagesResponse(SearchResponse response) throws IOException { - SearchHit[] hits = response.getHits().getHits(); - TypeFactory factory = TypeFactory.defaultInstance(); - MapType type = factory.constructMapType(HashMap.class, String.class, String.class); - List messages = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String source = hit.getSourceAsString(); - Map mapSource = objectMapper.readValue(source, type); - Message msg = new Message(mapSource.get("messageId"), mapSource.get("payload"), null); - messages.add(msg); - } - return messages; - } - - @Override - public List getEventExecutions(String event) { - try { - BoolQueryBuilder query = boolQueryBuilder("event='" + event + "'", "*"); - - // Create the searchObjectIdsViaExpression source - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(query); - searchSourceBuilder.sort(new FieldSortBuilder("created").order(SortOrder.ASC)); - - // Generate the actual request to send to ES. - SearchRequest searchRequest = new SearchRequest(eventIndexPrefix + "*"); - searchRequest.source(searchSourceBuilder); - - SearchResponse response = - elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); - - return mapEventExecutionsResponse(response); - } catch (Exception e) { - logger.error("Failed to get executions for event: {}", event, e); - } - return null; - } - - private List mapEventExecutionsResponse(SearchResponse response) - throws IOException { - SearchHit[] hits = response.getHits().getHits(); - List executions = new ArrayList<>(hits.length); - for (SearchHit hit : hits) { - String source = hit.getSourceAsString(); - EventExecution tel = objectMapper.readValue(source, EventExecution.class); - executions.add(tel); - } - return executions; - } - - @Override - public void addMessage(String queue, Message message) { - try { - long startTime = Instant.now().toEpochMilli(); - Map doc = new HashMap<>(); - doc.put("messageId", message.getId()); - doc.put("payload", message.getPayload()); - doc.put("queue", queue); - doc.put("created", System.currentTimeMillis()); - - indexObject(messageIndexName, MSG_DOC_TYPE, doc); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for indexing message: {}", - endTime - startTime, - message.getId()); - Monitors.recordESIndexTime("add_message", MSG_DOC_TYPE, endTime - startTime); - } catch (Exception e) { - logger.error("Failed to index message: {}", message.getId(), e); - } - } - - @Override - public CompletableFuture asyncAddMessage(String queue, Message message) { - return CompletableFuture.runAsync(() -> addMessage(queue, message), executorService); - } - - @Override - public void addEventExecution(EventExecution eventExecution) { - try { - long startTime = Instant.now().toEpochMilli(); - String id = - eventExecution.getName() - + "." - + eventExecution.getEvent() - + "." - + eventExecution.getMessageId() - + "." - + eventExecution.getId(); - - indexObject(eventIndexName, EVENT_DOC_TYPE, id, eventExecution); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for indexing event execution: {}", - endTime - startTime, - eventExecution.getId()); - Monitors.recordESIndexTime("add_event_execution", EVENT_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); - } catch (Exception e) { - logger.error("Failed to index event execution: {}", eventExecution.getId(), e); - } - } - - @Override - public CompletableFuture asyncAddEventExecution(EventExecution eventExecution) { - return CompletableFuture.runAsync( - () -> addEventExecution(eventExecution), logExecutorService); - } - - @Override - public SearchResult searchWorkflows( - String query, String freeText, int start, int count, List sort) { - try { - return searchObjectIdsViaExpression( - query, start, count, sort, freeText, WORKFLOW_DOC_TYPE); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public SearchResult searchTasks( - String query, String freeText, int start, int count, List sort) { - try { - return searchObjectIdsViaExpression(query, start, count, sort, freeText, TASK_DOC_TYPE); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - @Override - public void removeWorkflow(String workflowId) { - long startTime = Instant.now().toEpochMilli(); - DeleteRequest request = new DeleteRequest(workflowIndexName, workflowId); - - try { - DeleteResponse response = elasticSearchClient.delete(request, RequestOptions.DEFAULT); - - if (response.getResult() == DocWriteResponse.Result.NOT_FOUND) { - logger.error("Index removal failed - document not found by id: {}", workflowId); - } - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for removing workflow: {}", endTime - startTime, workflowId); - Monitors.recordESIndexTime("remove_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); - } catch (IOException e) { - logger.error("Failed to remove workflow {} from index", workflowId, e); - Monitors.error(className, "remove"); - } - } - - @Override - public CompletableFuture asyncRemoveWorkflow(String workflowId) { - return CompletableFuture.runAsync(() -> removeWorkflow(workflowId), executorService); - } - - @Override - public void updateWorkflow(String workflowInstanceId, String[] keys, Object[] values) { - try { - if (keys.length != values.length) { - throw new ApplicationException( - ApplicationException.Code.INVALID_INPUT, - "Number of keys and values do not match"); - } - - long startTime = Instant.now().toEpochMilli(); - UpdateRequest request = new UpdateRequest(workflowIndexName, workflowInstanceId); - Map source = - IntStream.range(0, keys.length) - .boxed() - .collect(Collectors.toMap(i -> keys[i], i -> values[i])); - request.doc(source); - - logger.debug("Updating workflow {} with {}", workflowInstanceId, source); - elasticSearchClient.update(request, RequestOptions.DEFAULT); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for updating workflow: {}", - endTime - startTime, - workflowInstanceId); - Monitors.recordESIndexTime("update_workflow", WORKFLOW_DOC_TYPE, endTime - startTime); - Monitors.recordWorkerQueueSize( - "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); - } catch (Exception e) { - logger.error("Failed to update workflow {}", workflowInstanceId, e); - Monitors.error(className, "update"); - } - } - - @Override - public CompletableFuture asyncUpdateWorkflow( - String workflowInstanceId, String[] keys, Object[] values) { - return CompletableFuture.runAsync( - () -> updateWorkflow(workflowInstanceId, keys, values), executorService); - } - - @Override - public String get(String workflowInstanceId, String fieldToGet) { - GetRequest request = new GetRequest(workflowIndexName, workflowInstanceId); - GetResponse response; - try { - response = elasticSearchClient.get(request, RequestOptions.DEFAULT); - } catch (IOException e) { - logger.error( - "Unable to get Workflow: {} from ElasticSearch index: {}", - workflowInstanceId, - workflowIndexName, - e); - return null; - } - - if (response.isExists()) { - Map sourceAsMap = response.getSourceAsMap(); - if (sourceAsMap.get(fieldToGet) != null) { - return sourceAsMap.get(fieldToGet).toString(); - } - } - - logger.debug( - "Unable to find Workflow: {} in ElasticSearch index: {}.", - workflowInstanceId, - workflowIndexName); - return null; - } - - private SearchResult searchObjectIdsViaExpression( - String structuredQuery, - int start, - int size, - List sortOptions, - String freeTextQuery, - String docType) - throws ParserException, IOException { - QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); - return searchObjectIds( - getIndexName(docType), queryBuilder, start, size, sortOptions, docType); - } - - private SearchResult searchObjectIds( - String indexName, QueryBuilder queryBuilder, int start, int size, String docType) - throws IOException { - return searchObjectIds(indexName, queryBuilder, start, size, null, docType); - } - - /** - * Tries to find object ids for a given query in an index. - * - * @param indexName The name of the index. - * @param queryBuilder The query to use for searching. - * @param start The start to use. - * @param size The total return size. - * @param sortOptions A list of string options to sort in the form VALUE:ORDER; where ORDER is - * optional and can be either ASC OR DESC. - * @param docType The document type to searchObjectIdsViaExpression for. - * @return The SearchResults which includes the count and IDs that were found. - * @throws IOException If we cannot communicate with ES. - */ - private SearchResult searchObjectIds( - String indexName, - QueryBuilder queryBuilder, - int start, - int size, - List sortOptions, - String docType) - throws IOException { - SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder(); - searchSourceBuilder.query(queryBuilder); - searchSourceBuilder.from(start); - searchSourceBuilder.size(size); - - if (sortOptions != null && !sortOptions.isEmpty()) { - - for (String sortOption : sortOptions) { - SortOrder order = SortOrder.ASC; - String field = sortOption; - int index = sortOption.indexOf(":"); - if (index > 0) { - field = sortOption.substring(0, index); - order = SortOrder.valueOf(sortOption.substring(index + 1)); - } - searchSourceBuilder.sort(new FieldSortBuilder(field).order(order)); - } - } - - // Generate the actual request to send to ES. - SearchRequest searchRequest = new SearchRequest(indexName); - searchRequest.source(searchSourceBuilder); - - SearchResponse response = elasticSearchClient.search(searchRequest, RequestOptions.DEFAULT); - - List result = new LinkedList<>(); - response.getHits().forEach(hit -> result.add(hit.getId())); - long count = response.getHits().getTotalHits().value; - return new SearchResult<>(count, result); - } - - @Override - public List searchArchivableWorkflows(String indexName, long archiveTtlDays) { - QueryBuilder q = - QueryBuilders.boolQuery() - .must( - QueryBuilders.rangeQuery("endTime") - .lt(LocalDate.now().minusDays(archiveTtlDays).toString()) - .gte( - LocalDate.now() - .minusDays(archiveTtlDays) - .minusDays(1) - .toString())) - .should(QueryBuilders.termQuery("status", "COMPLETED")) - .should(QueryBuilders.termQuery("status", "FAILED")) - .should(QueryBuilders.termQuery("status", "TIMED_OUT")) - .should(QueryBuilders.termQuery("status", "TERMINATED")) - .mustNot(QueryBuilders.existsQuery("archived")) - .minimumShouldMatch(1); - - SearchResult workflowIds; - try { - workflowIds = searchObjectIds(indexName, q, 0, 1000, WORKFLOW_DOC_TYPE); - } catch (IOException e) { - logger.error("Unable to communicate with ES to find archivable workflows", e); - return Collections.emptyList(); - } - - return workflowIds.getResults(); - } - - @Override - public long getWorkflowCount(String query, String freeText) { - try { - return getObjectCounts(query, freeText, WORKFLOW_DOC_TYPE); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, e.getMessage(), e); - } - } - - private long getObjectCounts(String structuredQuery, String freeTextQuery, String docType) - throws ParserException, IOException { - QueryBuilder queryBuilder = boolQueryBuilder(structuredQuery, freeTextQuery); - - String indexName = getIndexName(docType); - CountRequest countRequest = new CountRequest(new String[] {indexName}, queryBuilder); - CountResponse countResponse = - elasticSearchClient.count(countRequest, RequestOptions.DEFAULT); - return countResponse.getCount(); - } - - public List searchRecentRunningWorkflows( - int lastModifiedHoursAgoFrom, int lastModifiedHoursAgoTo) { - DateTime dateTime = new DateTime(); - QueryBuilder q = - QueryBuilders.boolQuery() - .must( - QueryBuilders.rangeQuery("updateTime") - .gt(dateTime.minusHours(lastModifiedHoursAgoFrom))) - .must( - QueryBuilders.rangeQuery("updateTime") - .lt(dateTime.minusHours(lastModifiedHoursAgoTo))) - .must(QueryBuilders.termQuery("status", "RUNNING")); - - SearchResult workflowIds; - try { - workflowIds = - searchObjectIds( - workflowIndexName, - q, - 0, - 5000, - Collections.singletonList("updateTime:ASC"), - WORKFLOW_DOC_TYPE); - } catch (IOException e) { - logger.error("Unable to communicate with ES to find recent running workflows", e); - return Collections.emptyList(); - } - - return workflowIds.getResults(); - } - - private void indexObject(final String index, final String docType, final Object doc) { - indexObject(index, docType, null, doc); - } - - private void indexObject( - final String index, final String docType, final String docId, final Object doc) { - - byte[] docBytes; - try { - docBytes = objectMapper.writeValueAsBytes(doc); - } catch (JsonProcessingException e) { - logger.error("Failed to convert {} '{}' to byte string", docType, docId); - return; - } - IndexRequest request = new IndexRequest(index); - request.id(docId).source(docBytes, XContentType.JSON); - - if (bulkRequests.get(docType) == null) { - bulkRequests.put( - docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest())); - } - - bulkRequests.get(docType).getBulkRequest().add(request); - if (bulkRequests.get(docType).getBulkRequest().numberOfActions() >= this.indexBatchSize) { - indexBulkRequest(docType); - } - } - - private synchronized void indexBulkRequest(String docType) { - if (bulkRequests.get(docType).getBulkRequest() != null - && bulkRequests.get(docType).getBulkRequest().numberOfActions() > 0) { - synchronized (bulkRequests.get(docType).getBulkRequest()) { - indexWithRetry( - bulkRequests.get(docType).getBulkRequest().get(), - "Bulk Indexing " + docType, - docType); - bulkRequests.put( - docType, new BulkRequests(System.currentTimeMillis(), new BulkRequest())); - } - } - } - - /** - * Performs an index operation with a retry. - * - * @param request The index request that we want to perform. - * @param operationDescription The type of operation that we are performing. - */ - private void indexWithRetry( - final BulkRequest request, final String operationDescription, String docType) { - try { - long startTime = Instant.now().toEpochMilli(); - retryTemplate.execute( - context -> elasticSearchClient.bulk(request, RequestOptions.DEFAULT)); - long endTime = Instant.now().toEpochMilli(); - logger.debug( - "Time taken {} for indexing object of type: {}", endTime - startTime, docType); - Monitors.recordESIndexTime("index_object", docType, endTime - startTime); - Monitors.recordWorkerQueueSize( - "indexQueue", ((ThreadPoolExecutor) executorService).getQueue().size()); - Monitors.recordWorkerQueueSize( - "logQueue", ((ThreadPoolExecutor) logExecutorService).getQueue().size()); - } catch (Exception e) { - Monitors.error(className, "index"); - logger.error("Failed to index {} for request type: {}", request, docType, e); - } - } - - /** - * Flush the buffers if bulk requests have not been indexed for the past {@link - * ElasticSearchProperties#getAsyncBufferFlushTimeout()} seconds This is to prevent data loss in - * case the instance is terminated, while the buffer still holds documents to be indexed. - */ - private void flushBulkRequests() { - bulkRequests.entrySet().stream() - .filter( - entry -> - (System.currentTimeMillis() - entry.getValue().getLastFlushTime()) - >= asyncBufferFlushTimeout * 1000L) - .filter( - entry -> - entry.getValue().getBulkRequest() != null - && entry.getValue().getBulkRequest().numberOfActions() > 0) - .forEach( - entry -> { - logger.debug( - "Flushing bulk request buffer for type {}, size: {}", - entry.getKey(), - entry.getValue().getBulkRequest().numberOfActions()); - indexBulkRequest(entry.getKey()); - }); - } - - private static class BulkRequests { - - private final long lastFlushTime; - private final BulkRequestWrapper bulkRequest; - - long getLastFlushTime() { - return lastFlushTime; - } - - BulkRequestWrapper getBulkRequest() { - return bulkRequest; - } - - BulkRequests(long lastFlushTime, BulkRequest bulkRequest) { - this.lastFlushTime = lastFlushTime; - this.bulkRequest = new BulkRequestWrapper(bulkRequest); - } - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java deleted file mode 100644 index 6ad82e7d06..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/Expression.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; - -import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; -import com.netflix.conductor.es7.dao.query.parser.internal.BooleanOp; -import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; - -/** - * @author Viren - */ -public class Expression extends AbstractNode implements FilterProvider { - - private NameValue nameVal; - - private GroupedExpression ge; - - private BooleanOp op; - - private Expression rhs; - - public Expression(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(1); - - if (peeked[0] == '(') { - this.ge = new GroupedExpression(is); - } else { - this.nameVal = new NameValue(is); - } - - peeked = peek(3); - if (isBoolOpr(peeked)) { - // we have an expression next - this.op = new BooleanOp(is); - this.rhs = new Expression(is); - } - } - - public boolean isBinaryExpr() { - return this.op != null; - } - - public BooleanOp getOperator() { - return this.op; - } - - public Expression getRightHandSide() { - return this.rhs; - } - - public boolean isNameValue() { - return this.nameVal != null; - } - - public NameValue getNameValue() { - return this.nameVal; - } - - public GroupedExpression getGroupedExpression() { - return this.ge; - } - - @Override - public QueryBuilder getFilterBuilder() { - QueryBuilder lhs = null; - if (nameVal != null) { - lhs = nameVal.getFilterBuilder(); - } else { - lhs = ge.getFilterBuilder(); - } - - if (this.isBinaryExpr()) { - QueryBuilder rhsFilter = rhs.getFilterBuilder(); - if (this.op.isAnd()) { - return QueryBuilders.boolQuery().must(lhs).must(rhsFilter); - } else { - return QueryBuilders.boolQuery().should(lhs).should(rhsFilter); - } - } else { - return lhs; - } - } - - @Override - public String toString() { - if (isBinaryExpr()) { - return "" + (nameVal == null ? ge : nameVal) + op + rhs; - } else { - return "" + (nameVal == null ? ge : nameVal); - } - } - - public static Expression fromString(String value) throws ParserException { - return new Expression(new BufferedInputStream(new ByteArrayInputStream(value.getBytes()))); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java deleted file mode 100644 index 80d17f6105..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/FilterProvider.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import org.elasticsearch.index.query.QueryBuilder; - -/** - * @author Viren - */ -public interface FilterProvider { - - /** - * @return FilterBuilder for elasticsearch - */ - public QueryBuilder getFilterBuilder(); -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java deleted file mode 100644 index dc33581308..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/GroupedExpression.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import java.io.InputStream; - -import org.elasticsearch.index.query.QueryBuilder; - -import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; -import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; - -/** - * @author Viren - */ -public class GroupedExpression extends AbstractNode implements FilterProvider { - - private Expression expression; - - public GroupedExpression(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = read(1); - assertExpected(peeked, "("); - - this.expression = new Expression(is); - - peeked = read(1); - assertExpected(peeked, ")"); - } - - @Override - public String toString() { - return "(" + expression + ")"; - } - - /** - * @return the expression - */ - public Expression getExpression() { - return expression; - } - - @Override - public QueryBuilder getFilterBuilder() { - return expression.getFilterBuilder(); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java deleted file mode 100644 index 604cf59d13..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/NameValue.java +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import java.io.InputStream; - -import org.elasticsearch.index.query.QueryBuilder; -import org.elasticsearch.index.query.QueryBuilders; - -import com.netflix.conductor.es7.dao.query.parser.internal.AbstractNode; -import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp; -import com.netflix.conductor.es7.dao.query.parser.internal.ComparisonOp.Operators; -import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue; -import com.netflix.conductor.es7.dao.query.parser.internal.ListConst; -import com.netflix.conductor.es7.dao.query.parser.internal.Name; -import com.netflix.conductor.es7.dao.query.parser.internal.ParserException; -import com.netflix.conductor.es7.dao.query.parser.internal.Range; - -/** - * @author Viren - *

- * Represents an expression of the form as below:
- * key OPR value
- * OPR is the comparison operator which could be on the following:
- * 	>, <, = , !=, IN, BETWEEN
- * 
- */ -public class NameValue extends AbstractNode implements FilterProvider { - - private Name name; - - private ComparisonOp op; - - private ConstValue value; - - private Range range; - - private ListConst valueList; - - public NameValue(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.name = new Name(is); - this.op = new ComparisonOp(is); - - if (this.op.getOperator().equals(Operators.BETWEEN.value())) { - this.range = new Range(is); - } - if (this.op.getOperator().equals(Operators.IN.value())) { - this.valueList = new ListConst(is); - } else { - this.value = new ConstValue(is); - } - } - - @Override - public String toString() { - return "" + name + op + value; - } - - /** - * @return the name - */ - public Name getName() { - return name; - } - - /** - * @return the op - */ - public ComparisonOp getOp() { - return op; - } - - /** - * @return the value - */ - public ConstValue getValue() { - return value; - } - - @Override - public QueryBuilder getFilterBuilder() { - if (op.getOperator().equals(Operators.EQUALS.value())) { - return QueryBuilders.queryStringQuery( - name.getName() + ":" + value.getValue().toString()); - } else if (op.getOperator().equals(Operators.BETWEEN.value())) { - return QueryBuilders.rangeQuery(name.getName()) - .from(range.getLow()) - .to(range.getHigh()); - } else if (op.getOperator().equals(Operators.IN.value())) { - return QueryBuilders.termsQuery(name.getName(), valueList.getList()); - } else if (op.getOperator().equals(Operators.NOT_EQUALS.value())) { - return QueryBuilders.queryStringQuery( - "NOT " + name.getName() + ":" + value.getValue().toString()); - } else if (op.getOperator().equals(Operators.GREATER_THAN.value())) { - return QueryBuilders.rangeQuery(name.getName()) - .from(value.getValue()) - .includeLower(false) - .includeUpper(false); - } else if (op.getOperator().equals(Operators.IS.value())) { - if (value.getSysConstant().equals(ConstValue.SystemConsts.NULL)) { - return QueryBuilders.boolQuery() - .mustNot( - QueryBuilders.boolQuery() - .must(QueryBuilders.matchAllQuery()) - .mustNot(QueryBuilders.existsQuery(name.getName()))); - } else if (value.getSysConstant().equals(ConstValue.SystemConsts.NOT_NULL)) { - return QueryBuilders.boolQuery() - .mustNot( - QueryBuilders.boolQuery() - .must(QueryBuilders.matchAllQuery()) - .must(QueryBuilders.existsQuery(name.getName()))); - } - } else if (op.getOperator().equals(Operators.LESS_THAN.value())) { - return QueryBuilders.rangeQuery(name.getName()) - .to(value.getValue()) - .includeLower(false) - .includeUpper(false); - } else if (op.getOperator().equals(Operators.STARTS_WITH.value())) { - return QueryBuilders.prefixQuery(name.getName(), value.getUnquotedValue()); - } - - throw new IllegalStateException("Incorrect/unsupported operators"); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java deleted file mode 100644 index 64c765acdd..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractNode.java +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; -import java.math.BigDecimal; -import java.util.HashSet; -import java.util.Set; -import java.util.regex.Pattern; - -/** - * @author Viren - */ -public abstract class AbstractNode { - - public static final Pattern WHITESPACE = Pattern.compile("\\s"); - - protected static Set comparisonOprs = new HashSet(); - - static { - comparisonOprs.add('>'); - comparisonOprs.add('<'); - comparisonOprs.add('='); - } - - protected InputStream is; - - protected AbstractNode(InputStream is) throws ParserException { - this.is = is; - this.parse(); - } - - protected boolean isNumber(String test) { - try { - // If you can convert to a big decimal value, then it is a number. - new BigDecimal(test); - return true; - - } catch (NumberFormatException e) { - // Ignore - } - return false; - } - - protected boolean isBoolOpr(byte[] buffer) { - if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') { - return true; - } else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') { - return true; - } - return false; - } - - protected boolean isComparisonOpr(byte[] buffer) { - if (buffer[0] == 'I' && buffer[1] == 'N') { - return true; - } else if (buffer[0] == '!' && buffer[1] == '=') { - return true; - } else { - return comparisonOprs.contains((char) buffer[0]); - } - } - - protected byte[] peek(int length) throws Exception { - return read(length, true); - } - - protected byte[] read(int length) throws Exception { - return read(length, false); - } - - protected String readToken() throws Exception { - skipWhitespace(); - StringBuilder sb = new StringBuilder(); - while (is.available() > 0) { - char c = (char) peek(1)[0]; - if (c == ' ' || c == '\t' || c == '\n' || c == '\r') { - is.skip(1); - break; - } else if (c == '=' || c == '>' || c == '<' || c == '!') { - // do not skip - break; - } - sb.append(c); - is.skip(1); - } - return sb.toString().trim(); - } - - protected boolean isNumeric(char c) { - if (c == '-' || c == 'e' || (c >= '0' && c <= '9') || c == '.') { - return true; - } - return false; - } - - protected void assertExpected(byte[] found, String expected) throws ParserException { - assertExpected(new String(found), expected); - } - - protected void assertExpected(String found, String expected) throws ParserException { - if (!found.equals(expected)) { - throw new ParserException("Expected " + expected + ", found " + found); - } - } - - protected void assertExpected(char found, char expected) throws ParserException { - if (found != expected) { - throw new ParserException("Expected " + expected + ", found " + found); - } - } - - protected static void efor(int length, FunctionThrowingException consumer) - throws Exception { - for (int i = 0; i < length; i++) { - consumer.accept(i); - } - } - - protected abstract void _parse() throws Exception; - - // Public stuff here - private void parse() throws ParserException { - // skip white spaces - skipWhitespace(); - try { - _parse(); - } catch (Exception e) { - System.out.println("\t" + this.getClass().getSimpleName() + "->" + this.toString()); - if (!(e instanceof ParserException)) { - throw new ParserException("Error parsing", e); - } else { - throw (ParserException) e; - } - } - skipWhitespace(); - } - - // Private methods - - private byte[] read(int length, boolean peekOnly) throws Exception { - byte[] buf = new byte[length]; - if (peekOnly) { - is.mark(length); - } - efor(length, (Integer c) -> buf[c] = (byte) is.read()); - if (peekOnly) { - is.reset(); - } - return buf; - } - - protected void skipWhitespace() throws ParserException { - try { - while (is.available() > 0) { - byte c = peek(1)[0]; - if (c == ' ' || c == '\t' || c == '\n' || c == '\r') { - // skip - read(1); - } else { - break; - } - } - } catch (Exception e) { - throw new ParserException(e.getMessage(), e); - } - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java deleted file mode 100644 index 2417c66192..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/BooleanOp.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; - -/** - * @author Viren - */ -public class BooleanOp extends AbstractNode { - - private String value; - - public BooleanOp(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] buffer = peek(3); - if (buffer.length > 1 && buffer[0] == 'O' && buffer[1] == 'R') { - this.value = "OR"; - } else if (buffer.length > 2 && buffer[0] == 'A' && buffer[1] == 'N' && buffer[2] == 'D') { - this.value = "AND"; - } else { - throw new ParserException("No valid boolean operator found..."); - } - read(this.value.length()); - } - - @Override - public String toString() { - return " " + value + " "; - } - - public String getOperator() { - return value; - } - - public boolean isAnd() { - return "AND".equals(value); - } - - public boolean isOr() { - return "OR".equals(value); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java deleted file mode 100644 index 0a4ea78f61..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ComparisonOp.java +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; - -/** - * @author Viren - */ -public class ComparisonOp extends AbstractNode { - - public enum Operators { - BETWEEN("BETWEEN"), - EQUALS("="), - LESS_THAN("<"), - GREATER_THAN(">"), - IN("IN"), - NOT_EQUALS("!="), - IS("IS"), - STARTS_WITH("STARTS_WITH"); - - private final String value; - - Operators(String value) { - this.value = value; - } - - public String value() { - return value; - } - } - - static { - int max = 0; - for (Operators op : Operators.values()) { - max = Math.max(max, op.value().length()); - } - maxOperatorLength = max; - } - - private static final int maxOperatorLength; - - private static final int betweenLen = Operators.BETWEEN.value().length(); - private static final int startsWithLen = Operators.STARTS_WITH.value().length(); - - private String value; - - public ComparisonOp(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(maxOperatorLength); - if (peeked[0] == '=' || peeked[0] == '>' || peeked[0] == '<') { - this.value = new String(peeked, 0, 1); - } else if (peeked[0] == 'I' && peeked[1] == 'N') { - this.value = "IN"; - } else if (peeked[0] == 'I' && peeked[1] == 'S') { - this.value = "IS"; - } else if (peeked[0] == '!' && peeked[1] == '=') { - this.value = "!="; - } else if (peeked.length >= betweenLen - && peeked[0] == 'B' - && peeked[1] == 'E' - && peeked[2] == 'T' - && peeked[3] == 'W' - && peeked[4] == 'E' - && peeked[5] == 'E' - && peeked[6] == 'N') { - this.value = Operators.BETWEEN.value(); - } else if (peeked.length == startsWithLen - && new String(peeked).equals(Operators.STARTS_WITH.value())) { - this.value = Operators.STARTS_WITH.value(); - } else { - throw new ParserException( - "Expecting an operator (=, >, <, !=, BETWEEN, IN, STARTS_WITH), but found none. Peeked=>" - + new String(peeked)); - } - - read(this.value.length()); - } - - @Override - public String toString() { - return " " + value + " "; - } - - public String getOperator() { - return value; - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java deleted file mode 100644 index 62128d7e2a..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ConstValue.java +++ /dev/null @@ -1,140 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; - -/** - * @author Viren Constant value can be: - *

    - *
  1. List of values (a,b,c) - *
  2. Range of values (m AND n) - *
  3. A value (x) - *
  4. A value is either a string or a number - *
- */ -public class ConstValue extends AbstractNode { - - public static enum SystemConsts { - NULL("null"), - NOT_NULL("not null"); - private String value; - - SystemConsts(String value) { - this.value = value; - } - - public String value() { - return value; - } - } - - private static String QUOTE = "\""; - - private Object value; - - private SystemConsts sysConsts; - - public ConstValue(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = peek(4); - String sp = new String(peeked).trim(); - // Read a constant value (number or a string) - if (peeked[0] == '"' || peeked[0] == '\'') { - this.value = readString(is); - } else if (sp.toLowerCase().startsWith("not")) { - this.value = SystemConsts.NOT_NULL.value(); - sysConsts = SystemConsts.NOT_NULL; - read(SystemConsts.NOT_NULL.value().length()); - } else if (sp.equalsIgnoreCase(SystemConsts.NULL.value())) { - this.value = SystemConsts.NULL.value(); - sysConsts = SystemConsts.NULL; - read(SystemConsts.NULL.value().length()); - } else { - this.value = readNumber(is); - } - } - - private String readNumber(InputStream is) throws Exception { - StringBuilder sb = new StringBuilder(); - while (is.available() > 0) { - is.mark(1); - char c = (char) is.read(); - if (!isNumeric(c)) { - is.reset(); - break; - } else { - sb.append(c); - } - } - String numValue = sb.toString().trim(); - return numValue; - } - /** - * Reads an escaped string - * - * @throws Exception - */ - private String readString(InputStream is) throws Exception { - char delim = (char) read(1)[0]; - StringBuilder sb = new StringBuilder(); - boolean valid = false; - while (is.available() > 0) { - char c = (char) is.read(); - if (c == delim) { - valid = true; - break; - } else if (c == '\\') { - // read the next character as part of the value - c = (char) is.read(); - sb.append(c); - } else { - sb.append(c); - } - } - if (!valid) { - throw new ParserException( - "String constant is not quoted with <" + delim + "> : " + sb.toString()); - } - return QUOTE + sb.toString() + QUOTE; - } - - public Object getValue() { - return value; - } - - @Override - public String toString() { - return "" + value; - } - - public String getUnquotedValue() { - String result = toString(); - if (result.length() >= 2 && result.startsWith(QUOTE) && result.endsWith(QUOTE)) { - result = result.substring(1, result.length() - 1); - } - return result; - } - - public boolean isSysConstant() { - return this.sysConsts != null; - } - - public SystemConsts getSysConstant() { - return this.sysConsts; - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java deleted file mode 100644 index b5d48bb681..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/FunctionThrowingException.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -/** - * @author Viren - */ -@FunctionalInterface -public interface FunctionThrowingException { - - void accept(T t) throws Exception; -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java deleted file mode 100644 index a7b1ac72b1..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ListConst.java +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; -import java.util.LinkedList; -import java.util.List; - -/** - * @author Viren List of constants - */ -public class ListConst extends AbstractNode { - - private List values; - - public ListConst(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - byte[] peeked = read(1); - assertExpected(peeked, "("); - this.values = readList(); - } - - private List readList() throws Exception { - List list = new LinkedList(); - boolean valid = false; - char c; - - StringBuilder sb = new StringBuilder(); - while (is.available() > 0) { - c = (char) is.read(); - if (c == ')') { - valid = true; - break; - } else if (c == ',') { - list.add(sb.toString().trim()); - sb = new StringBuilder(); - } else { - sb.append(c); - } - } - list.add(sb.toString().trim()); - if (!valid) { - throw new ParserException("Expected ')' but never encountered in the stream"); - } - return list; - } - - public List getList() { - return (List) values; - } - - @Override - public String toString() { - return values.toString(); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java deleted file mode 100644 index 861937a2e8..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Name.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; - -/** - * @author Viren Represents the name of the field to be searched against. - */ -public class Name extends AbstractNode { - - private String value; - - public Name(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.value = readToken(); - } - - @Override - public String toString() { - return value; - } - - public String getName() { - return value; - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java deleted file mode 100644 index c2bc089010..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/ParserException.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -/** - * @author Viren - */ -@SuppressWarnings("serial") -public class ParserException extends Exception { - - public ParserException(String message) { - super(message); - } - - public ParserException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java b/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java deleted file mode 100644 index af0ff18601..0000000000 --- a/es7-persistence/src/main/java/com/netflix/conductor/es7/dao/query/parser/internal/Range.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.InputStream; - -/** - * @author Viren - */ -public class Range extends AbstractNode { - - private String low; - - private String high; - - public Range(InputStream is) throws ParserException { - super(is); - } - - @Override - protected void _parse() throws Exception { - this.low = readNumber(is); - - skipWhitespace(); - byte[] peeked = read(3); - assertExpected(peeked, "AND"); - skipWhitespace(); - - String num = readNumber(is); - if (num == null || "".equals(num)) { - throw new ParserException("Missing the upper range value..."); - } - this.high = num; - } - - private String readNumber(InputStream is) throws Exception { - StringBuilder sb = new StringBuilder(); - while (is.available() > 0) { - is.mark(1); - char c = (char) is.read(); - if (!isNumeric(c)) { - is.reset(); - break; - } else { - sb.append(c); - } - } - String numValue = sb.toString().trim(); - return numValue; - } - - /** - * @return the low - */ - public String getLow() { - return low; - } - - /** - * @return the high - */ - public String getHigh() { - return high; - } - - @Override - public String toString() { - return low + " AND " + high; - } -} diff --git a/es7-persistence/src/main/resources/mappings_docType_task.json b/es7-persistence/src/main/resources/mappings_docType_task.json deleted file mode 100644 index 3d102a013d..0000000000 --- a/es7-persistence/src/main/resources/mappings_docType_task.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "properties": { - "correlationId": { - "type": "keyword", - "index": true - }, - "endTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis" - }, - "executionTime": { - "type": "long" - }, - "input": { - "type": "text", - "index": true - }, - "output": { - "type": "text", - "index": true - }, - "queueWaitTime": { - "type": "long" - }, - "reasonForIncompletion": { - "type": "keyword", - "index": true - }, - "scheduledTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis" - }, - "startTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis" - }, - "status": { - "type": "keyword", - "index": true - }, - "taskDefName": { - "type": "keyword", - "index": true - }, - "taskId": { - "type": "keyword", - "index": true - }, - "taskType": { - "type": "keyword", - "index": true - }, - "updateTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis" - }, - "workflowId": { - "type": "keyword", - "index": true - }, - "workflowType": { - "type": "keyword", - "index": true - } - } -} diff --git a/es7-persistence/src/main/resources/mappings_docType_workflow.json b/es7-persistence/src/main/resources/mappings_docType_workflow.json deleted file mode 100644 index 51adac6317..0000000000 --- a/es7-persistence/src/main/resources/mappings_docType_workflow.json +++ /dev/null @@ -1,72 +0,0 @@ -{ - "properties": { - "correlationId": { - "type": "keyword", - "index": true, - "doc_values": true - }, - "endTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis", - "doc_values": true - }, - "executionTime": { - "type": "long", - "doc_values": true - }, - "failedReferenceTaskNames": { - "type": "text", - "index": false - }, - "input": { - "type": "text", - "index": true - }, - "output": { - "type": "text", - "index": true - }, - "reasonForIncompletion": { - "type": "keyword", - "index": true, - "doc_values": true - }, - "startTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis", - "doc_values": true - }, - "status": { - "type": "keyword", - "index": true, - "doc_values": true - }, - "updateTime": { - "type": "date", - "format": "strict_date_optional_time||epoch_millis", - "doc_values": true - }, - "version": { - "type": "long", - "doc_values": true - }, - "workflowId": { - "type": "keyword", - "index": true, - "doc_values": true - }, - "workflowType": { - "type": "keyword", - "index": true, - "doc_values": true - }, - "rawJSON": { - "type": "text", - "index": false - }, - "event": { - "type": "keyword", - "index": true - } - } -} diff --git a/es7-persistence/src/main/resources/template_event.json b/es7-persistence/src/main/resources/template_event.json deleted file mode 100644 index 3a01503204..0000000000 --- a/es7-persistence/src/main/resources/template_event.json +++ /dev/null @@ -1,48 +0,0 @@ -{ - "index_patterns": [ "*event*" ], - "template": { - "settings": { - "refresh_interval": "1s" - }, - "mappings": { - "properties": { - "action": { - "type": "keyword", - "index": true - }, - "created": { - "type": "long" - }, - "event": { - "type": "keyword", - "index": true - }, - "id": { - "type": "keyword", - "index": true - }, - "messageId": { - "type": "keyword", - "index": true - }, - "name": { - "type": "keyword", - "index": true - }, - "output": { - "properties": { - "workflowId": { - "type": "keyword", - "index": true - } - } - }, - "status": { - "type": "keyword", - "index": true - } - } - }, - "aliases" : { } - } -} diff --git a/es7-persistence/src/main/resources/template_message.json b/es7-persistence/src/main/resources/template_message.json deleted file mode 100644 index 63d571aeab..0000000000 --- a/es7-persistence/src/main/resources/template_message.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "index_patterns": [ "*message*" ], - "template": { - "settings": { - "refresh_interval": "1s" - }, - "mappings": { - "properties": { - "created": { - "type": "long" - }, - "messageId": { - "type": "keyword", - "index": true - }, - "payload": { - "type": "keyword", - "index": true - }, - "queue": { - "type": "keyword", - "index": true - } - } - }, - "aliases": { } - } -} diff --git a/es7-persistence/src/main/resources/template_task_log.json b/es7-persistence/src/main/resources/template_task_log.json deleted file mode 100644 index f7ec4bff01..0000000000 --- a/es7-persistence/src/main/resources/template_task_log.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "index_patterns": [ "*task*log*" ], - "template": { - "settings": { - "refresh_interval": "1s" - }, - "mappings": { - "properties": { - "createdTime": { - "type": "long" - }, - "log": { - "type": "keyword", - "index": true - }, - "taskId": { - "type": "keyword", - "index": true - } - } - }, - "aliases": { } - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java deleted file mode 100644 index 97829ddf7c..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchRestDaoBaseTest.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.io.BufferedReader; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.Reader; - -import org.apache.http.HttpHost; -import org.elasticsearch.client.Request; -import org.elasticsearch.client.Response; -import org.elasticsearch.client.RestClient; -import org.elasticsearch.client.RestClientBuilder; -import org.junit.After; -import org.junit.Before; -import org.springframework.retry.support.RetryTemplate; - -public abstract class ElasticSearchRestDaoBaseTest extends ElasticSearchTest { - - protected RestClient restClient; - protected ElasticSearchRestDAOV7 indexDAO; - - @Before - public void setup() throws Exception { - String httpHostAddress = container.getHttpHostAddress(); - String host = httpHostAddress.split(":")[0]; - int port = Integer.parseInt(httpHostAddress.split(":")[1]); - - properties.setUrl("http://" + httpHostAddress); - - RestClientBuilder restClientBuilder = RestClient.builder(new HttpHost(host, port, "http")); - restClient = restClientBuilder.build(); - - indexDAO = - new ElasticSearchRestDAOV7( - restClientBuilder, new RetryTemplate(), properties, objectMapper); - indexDAO.setup(); - } - - @After - public void tearDown() throws Exception { - deleteAllIndices(); - - if (restClient != null) { - restClient.close(); - } - } - - private void deleteAllIndices() throws IOException { - Response beforeResponse = restClient.performRequest(new Request("GET", "/_cat/indices")); - - Reader streamReader = new InputStreamReader(beforeResponse.getEntity().getContent()); - BufferedReader bufferedReader = new BufferedReader(streamReader); - - String line; - while ((line = bufferedReader.readLine()) != null) { - String[] fields = line.split("\\s"); - String endpoint = String.format("/%s", fields[2]); - - restClient.performRequest(new Request("DELETE", endpoint)); - } - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java deleted file mode 100644 index 32f7151249..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/ElasticSearchTest.java +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; -import org.testcontainers.elasticsearch.ElasticsearchContainer; -import org.testcontainers.utility.DockerImageName; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.es7.config.ElasticSearchProperties; - -import com.fasterxml.jackson.databind.ObjectMapper; - -@ContextConfiguration( - classes = {TestObjectMapperConfiguration.class, ElasticSearchTest.TestConfiguration.class}) -@RunWith(SpringRunner.class) -@TestPropertySource( - properties = {"conductor.indexing.enabled=true", "conductor.elasticsearch.version=7"}) -public abstract class ElasticSearchTest { - - @Configuration - static class TestConfiguration { - - @Bean - public ElasticSearchProperties elasticSearchProperties() { - return new ElasticSearchProperties(); - } - } - - protected static final ElasticsearchContainer container = - new ElasticsearchContainer( - DockerImageName.parse("docker.elastic.co/elasticsearch/elasticsearch-oss") - .withTag("7.6.2")); // this should match the client version - - @Autowired protected ObjectMapper objectMapper; - - @Autowired protected ElasticSearchProperties properties; - - @BeforeClass - public static void startServer() { - container.start(); - } - - @AfterClass - public static void stopServer() { - container.stop(); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java deleted file mode 100644 index 7fe4fc866a..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestBulkRequestBuilderWrapper.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import org.elasticsearch.action.bulk.BulkRequestBuilder; -import org.elasticsearch.action.index.IndexRequest; -import org.elasticsearch.action.update.UpdateRequest; -import org.junit.Test; -import org.mockito.Mockito; - -public class TestBulkRequestBuilderWrapper { - BulkRequestBuilder builder = Mockito.mock(BulkRequestBuilder.class); - BulkRequestBuilderWrapper wrapper = new BulkRequestBuilderWrapper(builder); - - @Test(expected = Exception.class) - public void testAddNullUpdateRequest() { - wrapper.add((UpdateRequest) null); - } - - @Test(expected = Exception.class) - public void testAddNullIndexRequest() { - wrapper.add((IndexRequest) null); - } - - @Test - public void testBuilderCalls() { - IndexRequest indexRequest = new IndexRequest(); - UpdateRequest updateRequest = new UpdateRequest(); - - wrapper.add(indexRequest); - wrapper.add(updateRequest); - wrapper.numberOfActions(); - wrapper.execute(); - - Mockito.verify(builder, Mockito.times(1)).add(indexRequest); - Mockito.verify(builder, Mockito.times(1)).add(updateRequest); - Mockito.verify(builder, Mockito.times(1)).numberOfActions(); - Mockito.verify(builder, Mockito.times(1)).execute(); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java deleted file mode 100644 index e055025a25..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7.java +++ /dev/null @@ -1,444 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.io.IOException; -import java.text.SimpleDateFormat; -import java.util.*; -import java.util.function.Supplier; - -import org.joda.time.DateTime; -import org.junit.Test; - -import com.netflix.conductor.common.metadata.events.EventExecution; -import com.netflix.conductor.common.metadata.events.EventHandler; -import com.netflix.conductor.common.metadata.tasks.TaskExecLog; -import com.netflix.conductor.common.run.TaskSummary; -import com.netflix.conductor.common.run.Workflow.WorkflowStatus; -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.es7.utils.TestUtils; - -import com.google.common.collect.ImmutableMap; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class TestElasticSearchRestDAOV7 extends ElasticSearchRestDaoBaseTest { - - private static final SimpleDateFormat SIMPLE_DATE_FORMAT = new SimpleDateFormat("yyyyMMWW"); - - private static final String INDEX_PREFIX = "conductor"; - private static final String WORKFLOW_DOC_TYPE = "workflow"; - private static final String TASK_DOC_TYPE = "task"; - private static final String MSG_DOC_TYPE = "message"; - private static final String EVENT_DOC_TYPE = "event"; - private static final String LOG_DOC_TYPE = "task_log"; - - private boolean indexExists(final String index) throws IOException { - return indexDAO.doesResourceExist("/" + index); - } - - private boolean doesMappingExist(final String index, final String mappingName) - throws IOException { - return indexDAO.doesResourceExist("/" + index + "/_mapping/" + mappingName); - } - - @Test - public void assertInitialSetup() throws IOException { - SIMPLE_DATE_FORMAT.setTimeZone(TimeZone.getTimeZone("GMT")); - - String workflowIndex = INDEX_PREFIX + "_" + WORKFLOW_DOC_TYPE; - String taskIndex = INDEX_PREFIX + "_" + TASK_DOC_TYPE; - - String taskLogIndex = - INDEX_PREFIX + "_" + LOG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - String messageIndex = - INDEX_PREFIX + "_" + MSG_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - String eventIndex = - INDEX_PREFIX + "_" + EVENT_DOC_TYPE + "_" + SIMPLE_DATE_FORMAT.format(new Date()); - - assertTrue("Index 'conductor_workflow' should exist", indexExists(workflowIndex)); - assertTrue("Index 'conductor_task' should exist", indexExists(taskIndex)); - - assertTrue("Index '" + taskLogIndex + "' should exist", indexExists(taskLogIndex)); - assertTrue("Index '" + messageIndex + "' should exist", indexExists(messageIndex)); - assertTrue("Index '" + eventIndex + "' should exist", indexExists(eventIndex)); - - assertTrue( - "Index template for 'message' should exist", - indexDAO.doesResourceExist("/_template/template_" + MSG_DOC_TYPE)); - assertTrue( - "Index template for 'event' should exist", - indexDAO.doesResourceExist("/_template/template_" + EVENT_DOC_TYPE)); - assertTrue( - "Index template for 'task_log' should exist", - indexDAO.doesResourceExist("/_template/template_" + LOG_DOC_TYPE)); - } - - @Test - public void shouldIndexWorkflow() { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - - assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); - } - - @Test - public void shouldIndexWorkflowAsync() throws Exception { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.asyncIndexWorkflow(workflowSummary).get(); - - assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); - } - - @Test - public void shouldRemoveWorkflow() { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - - // wait for workflow to be indexed - List workflows = - tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); - assertEquals(1, workflows.size()); - - indexDAO.removeWorkflow(workflowSummary.getWorkflowId()); - - workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0); - - assertTrue("Workflow was not removed.", workflows.isEmpty()); - } - - @Test - public void shouldAsyncRemoveWorkflow() throws Exception { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - - // wait for workflow to be indexed - List workflows = - tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 1); - assertEquals(1, workflows.size()); - - indexDAO.asyncRemoveWorkflow(workflowSummary.getWorkflowId()).get(); - - workflows = tryFindResults(() -> searchWorkflows(workflowSummary.getWorkflowId()), 0); - - assertTrue("Workflow was not removed.", workflows.isEmpty()); - } - - @Test - public void shouldUpdateWorkflow() { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - - indexDAO.updateWorkflow( - workflowSummary.getWorkflowId(), - new String[] {"status"}, - new Object[] {WorkflowStatus.COMPLETED}); - - workflowSummary.setStatus(WorkflowStatus.COMPLETED); - assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); - } - - @Test - public void shouldAsyncUpdateWorkflow() throws Exception { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - - indexDAO.asyncUpdateWorkflow( - workflowSummary.getWorkflowId(), - new String[] {"status"}, - new Object[] {WorkflowStatus.FAILED}) - .get(); - - workflowSummary.setStatus(WorkflowStatus.FAILED); - assertWorkflowSummary(workflowSummary.getWorkflowId(), workflowSummary); - } - - @Test - public void shouldIndexTask() { - TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); - indexDAO.indexTask(taskSummary); - - List tasks = tryFindResults(() -> searchTasks(taskSummary)); - - assertEquals(taskSummary.getTaskId(), tasks.get(0)); - } - - @Test - public void shouldIndexTaskAsync() throws Exception { - TaskSummary taskSummary = TestUtils.loadTaskSnapshot(objectMapper, "task_summary"); - indexDAO.asyncIndexTask(taskSummary).get(); - - List tasks = tryFindResults(() -> searchTasks(taskSummary)); - - assertEquals(taskSummary.getTaskId(), tasks.get(0)); - } - - @Test - public void shouldAddTaskExecutionLogs() { - List logs = new ArrayList<>(); - String taskId = uuid(); - logs.add(createLog(taskId, "log1")); - logs.add(createLog(taskId, "log2")); - logs.add(createLog(taskId, "log3")); - - indexDAO.addTaskExecutionLogs(logs); - - List indexedLogs = - tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3); - - assertEquals(3, indexedLogs.size()); - - assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs)); - } - - @Test - public void shouldAddTaskExecutionLogsAsync() throws Exception { - List logs = new ArrayList<>(); - String taskId = uuid(); - logs.add(createLog(taskId, "log1")); - logs.add(createLog(taskId, "log2")); - logs.add(createLog(taskId, "log3")); - - indexDAO.asyncAddTaskExecutionLogs(logs).get(); - - List indexedLogs = - tryFindResults(() -> indexDAO.getTaskExecutionLogs(taskId), 3); - - assertEquals(3, indexedLogs.size()); - - assertTrue("Not all logs was indexed", indexedLogs.containsAll(logs)); - } - - @Test - public void shouldAddMessage() { - String queue = "queue"; - Message message1 = new Message(uuid(), "payload1", null); - Message message2 = new Message(uuid(), "payload2", null); - - indexDAO.addMessage(queue, message1); - indexDAO.addMessage(queue, message2); - - List indexedMessages = tryFindResults(() -> indexDAO.getMessages(queue), 2); - - assertEquals(2, indexedMessages.size()); - - assertTrue( - "Not all messages was indexed", - indexedMessages.containsAll(Arrays.asList(message1, message2))); - } - - @Test - public void shouldAddEventExecution() { - String event = "event"; - EventExecution execution1 = createEventExecution(event); - EventExecution execution2 = createEventExecution(event); - - indexDAO.addEventExecution(execution1); - indexDAO.addEventExecution(execution2); - - List indexedExecutions = - tryFindResults(() -> indexDAO.getEventExecutions(event), 2); - - assertEquals(2, indexedExecutions.size()); - - assertTrue( - "Not all event executions was indexed", - indexedExecutions.containsAll(Arrays.asList(execution1, execution2))); - } - - @Test - public void shouldAsyncAddEventExecution() throws Exception { - String event = "event2"; - EventExecution execution1 = createEventExecution(event); - EventExecution execution2 = createEventExecution(event); - - indexDAO.asyncAddEventExecution(execution1).get(); - indexDAO.asyncAddEventExecution(execution2).get(); - - List indexedExecutions = - tryFindResults(() -> indexDAO.getEventExecutions(event), 2); - - assertEquals(2, indexedExecutions.size()); - - assertTrue( - "Not all event executions was indexed", - indexedExecutions.containsAll(Arrays.asList(execution1, execution2))); - } - - @Test - public void shouldAddIndexPrefixToIndexTemplate() throws Exception { - String json = TestUtils.loadJsonResource("expected_template_task_log"); - String content = indexDAO.loadTypeMappingSource("/template_task_log.json"); - - assertEquals(json, content); - } - - @Test - public void shouldSearchRecentRunningWorkflows() throws Exception { - WorkflowSummary oldWorkflow = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - oldWorkflow.setStatus(WorkflowStatus.RUNNING); - oldWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(2).toDate())); - - WorkflowSummary recentWorkflow = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - recentWorkflow.setStatus(WorkflowStatus.RUNNING); - recentWorkflow.setUpdateTime(getFormattedTime(new DateTime().minusHours(1).toDate())); - - WorkflowSummary tooRecentWorkflow = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - tooRecentWorkflow.setStatus(WorkflowStatus.RUNNING); - tooRecentWorkflow.setUpdateTime(getFormattedTime(new DateTime().toDate())); - - indexDAO.indexWorkflow(oldWorkflow); - indexDAO.indexWorkflow(recentWorkflow); - indexDAO.indexWorkflow(tooRecentWorkflow); - - Thread.sleep(1000); - - List ids = indexDAO.searchRecentRunningWorkflows(2, 1); - - assertEquals(1, ids.size()); - assertEquals(recentWorkflow.getWorkflowId(), ids.get(0)); - } - - @Test - public void shouldCountWorkflows() { - int counts = 1100; - for (int i = 0; i < counts; i++) { - WorkflowSummary workflowSummary = - TestUtils.loadWorkflowSnapshot(objectMapper, "workflow_summary"); - indexDAO.indexWorkflow(workflowSummary); - } - - // wait for workflow to be indexed - long result = tryGetCount(() -> getWorkflowCount("template_workflow", "RUNNING"), counts); - assertEquals(counts, result); - } - - private long tryGetCount(Supplier countFunction, int resultsCount) { - long result = 0; - for (int i = 0; i < 20; i++) { - result = countFunction.get(); - if (result == resultsCount) { - return result; - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e.getMessage(), e); - } - } - return result; - } - - // Get total workflow counts given the name and status - private long getWorkflowCount(String workflowName, String status) { - return indexDAO.getWorkflowCount( - "status=\"" + status + "\" AND workflowType=\"" + workflowName + "\"", "*"); - } - - private void assertWorkflowSummary(String workflowId, WorkflowSummary summary) { - assertEquals(summary.getWorkflowType(), indexDAO.get(workflowId, "workflowType")); - assertEquals(String.valueOf(summary.getVersion()), indexDAO.get(workflowId, "version")); - assertEquals(summary.getWorkflowId(), indexDAO.get(workflowId, "workflowId")); - assertEquals(summary.getCorrelationId(), indexDAO.get(workflowId, "correlationId")); - assertEquals(summary.getStartTime(), indexDAO.get(workflowId, "startTime")); - assertEquals(summary.getUpdateTime(), indexDAO.get(workflowId, "updateTime")); - assertEquals(summary.getEndTime(), indexDAO.get(workflowId, "endTime")); - assertEquals(summary.getStatus().name(), indexDAO.get(workflowId, "status")); - assertEquals(summary.getInput(), indexDAO.get(workflowId, "input")); - assertEquals(summary.getOutput(), indexDAO.get(workflowId, "output")); - assertEquals( - summary.getReasonForIncompletion(), - indexDAO.get(workflowId, "reasonForIncompletion")); - assertEquals( - String.valueOf(summary.getExecutionTime()), - indexDAO.get(workflowId, "executionTime")); - assertEquals(summary.getEvent(), indexDAO.get(workflowId, "event")); - assertEquals( - summary.getFailedReferenceTaskNames(), - indexDAO.get(workflowId, "failedReferenceTaskNames")); - } - - private String getFormattedTime(Date time) { - SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"); - sdf.setTimeZone(TimeZone.getTimeZone("GMT")); - return sdf.format(time); - } - - private List tryFindResults(Supplier> searchFunction) { - return tryFindResults(searchFunction, 1); - } - - private List tryFindResults(Supplier> searchFunction, int resultsCount) { - List result = Collections.emptyList(); - for (int i = 0; i < 20; i++) { - result = searchFunction.get(); - if (result.size() == resultsCount) { - return result; - } - try { - Thread.sleep(100); - } catch (InterruptedException e) { - throw new RuntimeException(e.getMessage(), e); - } - } - return result; - } - - private List searchWorkflows(String workflowId) { - return indexDAO.searchWorkflows( - "", "workflowId:\"" + workflowId + "\"", 0, 100, Collections.emptyList()) - .getResults(); - } - - private List searchTasks(TaskSummary taskSummary) { - return indexDAO.searchTasks( - "", - "workflowId:\"" + taskSummary.getWorkflowId() + "\"", - 0, - 100, - Collections.emptyList()) - .getResults(); - } - - private TaskExecLog createLog(String taskId, String log) { - TaskExecLog taskExecLog = new TaskExecLog(log); - taskExecLog.setTaskId(taskId); - return taskExecLog; - } - - private EventExecution createEventExecution(String event) { - EventExecution execution = new EventExecution(uuid(), uuid()); - execution.setName("name"); - execution.setEvent(event); - execution.setCreated(System.currentTimeMillis()); - execution.setStatus(EventExecution.Status.COMPLETED); - execution.setAction(EventHandler.Action.Type.start_workflow); - execution.setOutput(ImmutableMap.of("a", 1, "b", 2, "c", 3)); - return execution; - } - - private String uuid() { - return UUID.randomUUID().toString(); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java deleted file mode 100644 index 81b5971dff..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/index/TestElasticSearchRestDAOV7Batch.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.index; - -import java.util.HashMap; -import java.util.concurrent.TimeUnit; - -import org.junit.Test; -import org.springframework.test.context.TestPropertySource; - -import com.netflix.conductor.common.metadata.tasks.Task.Status; -import com.netflix.conductor.common.run.SearchResult; -import com.netflix.conductor.common.run.TaskSummary; - -import com.fasterxml.jackson.core.JsonProcessingException; - -import static org.awaitility.Awaitility.await; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -@TestPropertySource(properties = "conductor.elasticsearch.indexBatchSize=2") -public class TestElasticSearchRestDAOV7Batch extends ElasticSearchRestDaoBaseTest { - - @Test - public void indexTaskWithBatchSizeTwo() { - String correlationId = "some-correlation-id"; - - TaskSummary taskSummary = new TaskSummary(); - taskSummary.setTaskId("some-task-id"); - taskSummary.setWorkflowId("some-workflow-instance-id"); - taskSummary.setTaskType("some-task-type"); - taskSummary.setStatus(Status.FAILED); - try { - taskSummary.setInput( - objectMapper.writeValueAsString( - new HashMap() { - { - put("input_key", "input_value"); - } - })); - } catch (JsonProcessingException e) { - throw new RuntimeException(e); - } - taskSummary.setCorrelationId(correlationId); - taskSummary.setTaskDefName("some-task-def-name"); - taskSummary.setReasonForIncompletion("some-failure-reason"); - - indexDAO.indexTask(taskSummary); - indexDAO.indexTask(taskSummary); - - await().atMost(5, TimeUnit.SECONDS) - .untilAsserted( - () -> { - SearchResult result = - indexDAO.searchTasks( - "correlationId='" + correlationId + "'", - "*", - 0, - 10000, - null); - - assertTrue( - "should return 1 or more search results", - result.getResults().size() > 0); - assertEquals( - "taskId should match the indexed task", - "some-task-id", - result.getResults().get(0)); - }); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java deleted file mode 100644 index e6bf608ea3..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestExpression.java +++ /dev/null @@ -1,149 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -import org.junit.Test; - -import com.netflix.conductor.es7.dao.query.parser.internal.AbstractParserTest; -import com.netflix.conductor.es7.dao.query.parser.internal.ConstValue; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** - * @author Viren - */ -public class TestExpression extends AbstractParserTest { - - @Test - public void test() throws Exception { - String test = - "type='IMAGE' AND subType ='sdp' AND (metadata.width > 50 OR metadata.height > 50)"; - // test = "type='IMAGE' AND subType ='sdp'"; - // test = "(metadata.type = 'IMAGE')"; - InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - Expression expr = new Expression(is); - - System.out.println(expr); - - assertTrue(expr.isBinaryExpr()); - assertNull(expr.getGroupedExpression()); - assertNotNull(expr.getNameValue()); - - NameValue nv = expr.getNameValue(); - assertEquals("type", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"IMAGE\"", nv.getValue().getValue()); - - Expression rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertTrue(rhs.isBinaryExpr()); - - nv = rhs.getNameValue(); - assertNotNull(nv); // subType = sdp - assertNull(rhs.getGroupedExpression()); - assertEquals("subType", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"sdp\"", nv.getValue().getValue()); - - assertEquals("AND", rhs.getOperator().getOperator()); - rhs = rhs.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - GroupedExpression ge = rhs.getGroupedExpression(); - assertNotNull(ge); - expr = ge.getExpression(); - assertNotNull(expr); - - assertTrue(expr.isBinaryExpr()); - nv = expr.getNameValue(); - assertNotNull(nv); - assertEquals("metadata.width", nv.getName().getName()); - assertEquals(">", nv.getOp().getOperator()); - assertEquals("50", nv.getValue().getValue()); - - assertEquals("OR", expr.getOperator().getOperator()); - rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - nv = rhs.getNameValue(); - assertNotNull(nv); - - assertEquals("metadata.height", nv.getName().getName()); - assertEquals(">", nv.getOp().getOperator()); - assertEquals("50", nv.getValue().getValue()); - } - - @Test - public void testWithSysConstants() throws Exception { - String test = "type='IMAGE' AND subType ='sdp' AND description IS null"; - InputStream is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - Expression expr = new Expression(is); - - System.out.println(expr); - - assertTrue(expr.isBinaryExpr()); - assertNull(expr.getGroupedExpression()); - assertNotNull(expr.getNameValue()); - - NameValue nv = expr.getNameValue(); - assertEquals("type", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"IMAGE\"", nv.getValue().getValue()); - - Expression rhs = expr.getRightHandSide(); - assertNotNull(rhs); - assertTrue(rhs.isBinaryExpr()); - - nv = rhs.getNameValue(); - assertNotNull(nv); // subType = sdp - assertNull(rhs.getGroupedExpression()); - assertEquals("subType", nv.getName().getName()); - assertEquals("=", nv.getOp().getOperator()); - assertEquals("\"sdp\"", nv.getValue().getValue()); - - assertEquals("AND", rhs.getOperator().getOperator()); - rhs = rhs.getRightHandSide(); - assertNotNull(rhs); - assertFalse(rhs.isBinaryExpr()); - GroupedExpression ge = rhs.getGroupedExpression(); - assertNull(ge); - nv = rhs.getNameValue(); - assertNotNull(nv); - assertEquals("description", nv.getName().getName()); - assertEquals("IS", nv.getOp().getOperator()); - ConstValue cv = nv.getValue(); - assertNotNull(cv); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); - - test = "description IS not null"; - is = new BufferedInputStream(new ByteArrayInputStream(test.getBytes())); - expr = new Expression(is); - - System.out.println(expr); - nv = expr.getNameValue(); - assertNotNull(nv); - assertEquals("description", nv.getName().getName()); - assertEquals("IS", nv.getOp().getOperator()); - cv = nv.getValue(); - assertNotNull(cv); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java deleted file mode 100644 index 8a0955814c..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/TestGroupedExpression.java +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser; - -import org.junit.Test; - -/** - * @author Viren - */ -public class TestGroupedExpression { - - @Test - public void test() {} -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java deleted file mode 100644 index 7286697f04..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/AbstractParserTest.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.io.BufferedInputStream; -import java.io.ByteArrayInputStream; -import java.io.InputStream; - -/** - * @author Viren - */ -public abstract class AbstractParserTest { - - protected InputStream getInputStream(String expression) { - return new BufferedInputStream(new ByteArrayInputStream(expression.getBytes())); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java deleted file mode 100644 index bfcf45e76b..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestBooleanOp.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - */ -public class TestBooleanOp extends AbstractParserTest { - - @Test - public void test() throws Exception { - String[] tests = new String[] {"AND", "OR"}; - for (String test : tests) { - BooleanOp name = new BooleanOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } - } - - @Test(expected = ParserException.class) - public void testInvalid() throws Exception { - String test = "<"; - BooleanOp name = new BooleanOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java deleted file mode 100644 index 0e51dfea0a..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestComparisonOp.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - */ -public class TestComparisonOp extends AbstractParserTest { - - @Test - public void test() throws Exception { - String[] tests = new String[] {"<", ">", "=", "!=", "IN", "BETWEEN", "STARTS_WITH"}; - for (String test : tests) { - ComparisonOp name = new ComparisonOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } - } - - @Test(expected = ParserException.class) - public void testInvalidOp() throws Exception { - String test = "AND"; - ComparisonOp name = new ComparisonOp(getInputStream(test)); - String nameVal = name.getOperator(); - assertNotNull(nameVal); - assertEquals(test, nameVal); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java deleted file mode 100644 index d9f6eaf86f..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestConstValue.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import java.util.List; - -import org.junit.Test; - -import static org.junit.Assert.*; - -/** - * @author Viren - */ -public class TestConstValue extends AbstractParserTest { - - @Test - public void testStringConst() throws Exception { - String test = "'string value'"; - String expected = - test.replaceAll( - "'", "\""); // Quotes are removed but then the result is double quoted. - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(expected, cv.getValue()); - assertTrue(cv.getValue() instanceof String); - - test = "\"string value\""; - cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(expected, cv.getValue()); - assertTrue(cv.getValue() instanceof String); - } - - @Test - public void testSystemConst() throws Exception { - String test = "null"; - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertTrue(cv.getValue() instanceof String); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NULL); - test = "null"; - - test = "not null"; - cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertEquals(cv.getSysConstant(), ConstValue.SystemConsts.NOT_NULL); - } - - @Test(expected = ParserException.class) - public void testInvalid() throws Exception { - String test = "'string value"; - new ConstValue(getInputStream(test)); - } - - @Test - public void testNumConst() throws Exception { - String test = "12345.89"; - ConstValue cv = new ConstValue(getInputStream(test)); - assertNotNull(cv.getValue()); - assertTrue( - cv.getValue() - instanceof - String); // Numeric values are stored as string as we are just passing thru - // them to ES - assertEquals(test, cv.getValue()); - } - - @Test - public void testRange() throws Exception { - String test = "50 AND 100"; - Range range = new Range(getInputStream(test)); - assertEquals("50", range.getLow()); - assertEquals("100", range.getHigh()); - } - - @Test(expected = ParserException.class) - public void testBadRange() throws Exception { - String test = "50 AND"; - new Range(getInputStream(test)); - } - - @Test - public void testArray() throws Exception { - String test = "(1, 3, 'name', 'value2')"; - ListConst lc = new ListConst(getInputStream(test)); - List list = lc.getList(); - assertEquals(4, list.size()); - assertTrue(list.contains("1")); - assertEquals("'value2'", list.get(3)); // Values are preserved as it is... - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java deleted file mode 100644 index f8a979939b..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/dao/query/parser/internal/TestName.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2016 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.dao.query.parser.internal; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -/** - * @author Viren - */ -public class TestName extends AbstractParserTest { - - @Test - public void test() throws Exception { - String test = "metadata.en_US.lang "; - Name name = new Name(getInputStream(test)); - String nameVal = name.getName(); - assertNotNull(nameVal); - assertEquals(test.trim(), nameVal); - } -} diff --git a/es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java b/es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java deleted file mode 100644 index ca41ad57e1..0000000000 --- a/es7-persistence/src/test/java/com/netflix/conductor/es7/utils/TestUtils.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.es7.utils; - -import org.apache.commons.io.Charsets; - -import com.netflix.conductor.common.run.TaskSummary; -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.utils.IDGenerator; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.io.Resources; - -public class TestUtils { - - private static final String WORKFLOW_SCENARIO_EXTENSION = ".json"; - private static final String WORKFLOW_INSTANCE_ID_PLACEHOLDER = "WORKFLOW_INSTANCE_ID"; - - public static WorkflowSummary loadWorkflowSnapshot( - ObjectMapper objectMapper, String resourceFileName) { - try { - String content = loadJsonResource(resourceFileName); - String workflowId = new IDGenerator().generate(); - content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); - - return objectMapper.readValue(content, WorkflowSummary.class); - } catch (Exception e) { - throw new RuntimeException(e.getMessage(), e); - } - } - - public static TaskSummary loadTaskSnapshot(ObjectMapper objectMapper, String resourceFileName) { - try { - String content = loadJsonResource(resourceFileName); - String workflowId = new IDGenerator().generate(); - content = content.replace(WORKFLOW_INSTANCE_ID_PLACEHOLDER, workflowId); - - return objectMapper.readValue(content, TaskSummary.class); - } catch (Exception e) { - throw new RuntimeException(e.getMessage(), e); - } - } - - public static String loadJsonResource(String resourceFileName) { - try { - return Resources.toString( - TestUtils.class.getResource( - "/" + resourceFileName + WORKFLOW_SCENARIO_EXTENSION), - Charsets.UTF_8); - } catch (Exception e) { - throw new RuntimeException(e.getMessage(), e); - } - } -} diff --git a/es7-persistence/src/test/resources/expected_template_task_log.json b/es7-persistence/src/test/resources/expected_template_task_log.json deleted file mode 100644 index ebb8d4a202..0000000000 --- a/es7-persistence/src/test/resources/expected_template_task_log.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "index_patterns" : [ "*conductor_task*log*" ], - "template" : { - "settings" : { - "refresh_interval" : "1s" - }, - "mappings" : { - "properties" : { - "createdTime" : { - "type" : "long" - }, - "log" : { - "type" : "keyword", - "index" : true - }, - "taskId" : { - "type" : "keyword", - "index" : true - } - } - }, - "aliases" : { } - } -} \ No newline at end of file diff --git a/es7-persistence/src/test/resources/task_summary.json b/es7-persistence/src/test/resources/task_summary.json deleted file mode 100644 index a409a22f13..0000000000 --- a/es7-persistence/src/test/resources/task_summary.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "taskId": "9dea4567-0240-4eab-bde8-99f4535ea3fc", - "taskDefName": "templated_task", - "taskType": "templated_task", - "workflowId": "WORKFLOW_INSTANCE_ID", - "workflowType": "template_workflow", - "correlationId": "testTaskDefTemplate", - "scheduledTime": "2021-08-22T05:18:25.121Z", - "startTime": "0", - "endTime": "0", - "updateTime": "2021-08-23T00:18:25.121Z", - "status": "SCHEDULED", - "workflowPriority": 1, - "queueWaitTime": 0, - "executionTime": 0, - "input": "{http_request={method=GET, vipStack=test_stack, body={requestDetails={key1=value1, key2=42}, outputPath=s3://bucket/outputPath, inputPaths=[file://path1, file://path2]}, uri=/get/something}}" -} \ No newline at end of file diff --git a/es7-persistence/src/test/resources/workflow_summary.json b/es7-persistence/src/test/resources/workflow_summary.json deleted file mode 100644 index 443d8464eb..0000000000 --- a/es7-persistence/src/test/resources/workflow_summary.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "workflowType": "template_workflow", - "version": 1, - "workflowId": "WORKFLOW_INSTANCE_ID", - "priority": 1, - "correlationId": "testTaskDefTemplate", - "startTime": 1534983505050, - "updateTime": 1534983505131, - "endTime": 0, - "status": "RUNNING", - "input": "{path1=file://path1, path2=file://path2, requestDetails={key1=value1, key2=42}, outputPath=s3://bucket/outputPath}" -} diff --git a/grpc-server/dependencies.lock b/grpc-server/dependencies.lock index 31a913f66c..f87a774d2d 100644 --- a/grpc-server/dependencies.lock +++ b/grpc-server/dependencies.lock @@ -329,6 +329,12 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.google.android:annotations": { "locked": "4.1.1.4", "transitive": [ @@ -361,6 +367,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -714,6 +721,7 @@ "org.checkerframework:checker-qual": { "locked": "3.12.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -1362,6 +1370,12 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.google.android:annotations": { "locked": "4.1.1.4", "transitive": [ @@ -1400,6 +1414,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -1824,6 +1839,7 @@ "org.checkerframework:checker-qual": { "locked": "3.12.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, diff --git a/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java index 88a0142890..4a6fbfff30 100644 --- a/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java +++ b/grpc/src/main/java/com/netflix/conductor/grpc/AbstractProtoMapper.java @@ -745,45 +745,45 @@ public TaskDef fromProto(TaskDefPb.TaskDef from) { return to; } - public TaskDefPb.TaskDef.TimeoutPolicy toProto(TaskDef.TimeoutPolicy from) { - TaskDefPb.TaskDef.TimeoutPolicy to; + public TaskDefPb.TaskDef.RetryLogic toProto(TaskDef.RetryLogic from) { + TaskDefPb.TaskDef.RetryLogic to; switch (from) { - case RETRY: to = TaskDefPb.TaskDef.TimeoutPolicy.RETRY; break; - case TIME_OUT_WF: to = TaskDefPb.TaskDef.TimeoutPolicy.TIME_OUT_WF; break; - case ALERT_ONLY: to = TaskDefPb.TaskDef.TimeoutPolicy.ALERT_ONLY; break; + case FIXED: to = TaskDefPb.TaskDef.RetryLogic.FIXED; break; + case EXPONENTIAL_BACKOFF: to = TaskDefPb.TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; + case LINEAR_BACKOFF: to = TaskDefPb.TaskDef.RetryLogic.LINEAR_BACKOFF; break; default: throw new IllegalArgumentException("Unexpected enum constant: " + from); } return to; } - public TaskDef.TimeoutPolicy fromProto(TaskDefPb.TaskDef.TimeoutPolicy from) { - TaskDef.TimeoutPolicy to; + public TaskDef.RetryLogic fromProto(TaskDefPb.TaskDef.RetryLogic from) { + TaskDef.RetryLogic to; switch (from) { - case RETRY: to = TaskDef.TimeoutPolicy.RETRY; break; - case TIME_OUT_WF: to = TaskDef.TimeoutPolicy.TIME_OUT_WF; break; - case ALERT_ONLY: to = TaskDef.TimeoutPolicy.ALERT_ONLY; break; + case FIXED: to = TaskDef.RetryLogic.FIXED; break; + case EXPONENTIAL_BACKOFF: to = TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; + case LINEAR_BACKOFF: to = TaskDef.RetryLogic.LINEAR_BACKOFF; break; default: throw new IllegalArgumentException("Unexpected enum constant: " + from); } return to; } - public TaskDefPb.TaskDef.RetryLogic toProto(TaskDef.RetryLogic from) { - TaskDefPb.TaskDef.RetryLogic to; + public TaskDefPb.TaskDef.TimeoutPolicy toProto(TaskDef.TimeoutPolicy from) { + TaskDefPb.TaskDef.TimeoutPolicy to; switch (from) { - case FIXED: to = TaskDefPb.TaskDef.RetryLogic.FIXED; break; - case EXPONENTIAL_BACKOFF: to = TaskDefPb.TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; - case LINEAR_BACKOFF: to = TaskDefPb.TaskDef.RetryLogic.LINEAR_BACKOFF; break; + case RETRY: to = TaskDefPb.TaskDef.TimeoutPolicy.RETRY; break; + case TIME_OUT_WF: to = TaskDefPb.TaskDef.TimeoutPolicy.TIME_OUT_WF; break; + case ALERT_ONLY: to = TaskDefPb.TaskDef.TimeoutPolicy.ALERT_ONLY; break; default: throw new IllegalArgumentException("Unexpected enum constant: " + from); } return to; } - public TaskDef.RetryLogic fromProto(TaskDefPb.TaskDef.RetryLogic from) { - TaskDef.RetryLogic to; + public TaskDef.TimeoutPolicy fromProto(TaskDefPb.TaskDef.TimeoutPolicy from) { + TaskDef.TimeoutPolicy to; switch (from) { - case FIXED: to = TaskDef.RetryLogic.FIXED; break; - case EXPONENTIAL_BACKOFF: to = TaskDef.RetryLogic.EXPONENTIAL_BACKOFF; break; - case LINEAR_BACKOFF: to = TaskDef.RetryLogic.LINEAR_BACKOFF; break; + case RETRY: to = TaskDef.TimeoutPolicy.RETRY; break; + case TIME_OUT_WF: to = TaskDef.TimeoutPolicy.TIME_OUT_WF; break; + case ALERT_ONLY: to = TaskDef.TimeoutPolicy.ALERT_ONLY; break; default: throw new IllegalArgumentException("Unexpected enum constant: " + from); } return to; diff --git a/grpc/src/main/proto/model/taskdef.proto b/grpc/src/main/proto/model/taskdef.proto index 43c086c9ee..dd15508d6c 100644 --- a/grpc/src/main/proto/model/taskdef.proto +++ b/grpc/src/main/proto/model/taskdef.proto @@ -8,16 +8,16 @@ option java_outer_classname = "TaskDefPb"; option go_package = "github.com/netflix/conductor/client/gogrpc/conductor/model"; message TaskDef { - enum TimeoutPolicy { - RETRY = 0; - TIME_OUT_WF = 1; - ALERT_ONLY = 2; - } enum RetryLogic { FIXED = 0; EXPONENTIAL_BACKOFF = 1; LINEAR_BACKOFF = 2; } + enum TimeoutPolicy { + RETRY = 0; + TIME_OUT_WF = 1; + ALERT_ONLY = 2; + } string name = 1; string description = 2; int32 retry_count = 3; diff --git a/postgres-external-storage/build.gradle b/http-task/build.gradle similarity index 71% rename from postgres-external-storage/build.gradle rename to http-task/build.gradle index c6d02e7908..c525897a7b 100644 --- a/postgres-external-storage/build.gradle +++ b/http-task/build.gradle @@ -10,20 +10,15 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ - dependencies { implementation project(':conductor-common') implementation project(':conductor-core') compileOnly 'org.springframework.boot:spring-boot-starter' compileOnly 'org.springframework.boot:spring-boot-starter-web' - implementation 'org.postgresql:postgresql' - implementation 'org.springframework.boot:spring-boot-starter-jdbc' - implementation 'org.flywaydb:flyway-core' - implementation "org.springdoc:springdoc-openapi-ui:${revOpenapi}" + implementation "javax.ws.rs:jsr311-api:${revJsr311Api}" testImplementation 'org.springframework.boot:spring-boot-starter-web' - testImplementation "org.testcontainers:postgresql:${revTestContainer}" - - testImplementation project(':conductor-common').sourceSets.test.output -} + testImplementation "org.testcontainers:mockserver:${revTestContainer}" + testImplementation "org.mock-server:mockserver-client-java:${revMockServerClient}" +} \ No newline at end of file diff --git a/contribs/dependencies.lock b/http-task/dependencies.lock similarity index 76% rename from contribs/dependencies.lock rename to http-task/dependencies.lock index e6f17c079b..bbe8a09a16 100644 --- a/contribs/dependencies.lock +++ b/http-task/dependencies.lock @@ -5,34 +5,6 @@ } }, "compileClasspath": { - "com.amazonaws:aws-java-sdk-core": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, - "com.amazonaws:aws-java-sdk-kms": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-s3" - ] - }, - "com.amazonaws:aws-java-sdk-s3": { - "locked": "1.11.86" - }, - "com.amazonaws:aws-java-sdk-sqs": { - "locked": "1.11.86" - }, - "com.amazonaws:jmespath-java": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -44,7 +16,6 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", "com.fasterxml.jackson.module:jackson-module-parameter-names" @@ -53,22 +24,12 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "com.amazonaws:jmespath-java", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", "com.fasterxml.jackson.module:jackson-module-parameter-names", - "net.thisptr:jackson-jq", "org.springframework.boot:spring-boot-starter-json" ] }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, "com.fasterxml.jackson.datatype:jackson-datatype-jdk8": { "locked": "2.11.4", "transitive": [ @@ -87,113 +48,12 @@ "org.springframework.boot:spring-boot-starter-json" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", - "transitive": [ - "com.google.guava:guava" - ] - }, "com.netflix.conductor:conductor-common": { "project": true }, "com.netflix.conductor:conductor-core": { "project": true }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" - ] - }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0" - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0" - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.apache.httpcomponents:httpclient" - ] - }, - "commons-logging:commons-logging": { - "locked": "1.2", - "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "org.apache.httpcomponents:httpclient" - ] - }, - "io.dropwizard.metrics:metrics-core": { - "locked": "4.1.22", - "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3" - ] - }, - "io.micrometer:micrometer-core": { - "locked": "1.5.14", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.6.2" - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0" - }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.9.0", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.2.2" - }, "jakarta.annotation:jakarta.annotation-api": { "locked": "1.3.5", "transitive": [ @@ -204,33 +64,6 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1" }, - "joda-time:joda-time": { - "locked": "2.8.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, - "net.thisptr:jackson-jq": { - "locked": "0.0.13" - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.apache.httpcomponents:httpclient" - ] - }, - "org.apache.kafka:kafka-clients": { - "locked": "2.6.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -268,42 +101,12 @@ "org.springframework.boot:spring-boot-starter-tomcat" ] }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, "org.glassfish:jakarta.el": { "locked": "3.0.3", "transitive": [ "org.springframework.boot:spring-boot-starter-tomcat" ] }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.12", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.jruby.jcodings:jcodings": { - "locked": "1.0.43", - "transitive": [ - "org.jruby.joni:joni" - ] - }, - "org.jruby.joni:joni": { - "locked": "2.1.27", - "transitive": [ - "net.thisptr:jackson-jq" - ] - }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.slf4j:jul-to-slf4j": { "locked": "1.7.30", "transitive": [ @@ -313,9 +116,6 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.rabbitmq:amqp-client", - "io.dropwizard.metrics:metrics-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j" ] @@ -424,54 +224,14 @@ "org.springframework.boot:spring-boot-starter-web" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.yaml:snakeyaml": { "locked": "1.26", "transitive": [ "org.springframework.boot:spring-boot-starter" ] - }, - "software.amazon.ion:ion-java": { - "locked": "1.0.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] } }, "runtimeClasspath": { - "com.amazonaws:aws-java-sdk-core": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, - "com.amazonaws:aws-java-sdk-kms": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-s3" - ] - }, - "com.amazonaws:aws-java-sdk-s3": { - "locked": "1.11.86" - }, - "com.amazonaws:aws-java-sdk-sqs": { - "locked": "1.11.86" - }, - "com.amazonaws:jmespath-java": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -483,7 +243,6 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core" ] @@ -491,57 +250,20 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "com.amazonaws:jmespath-java", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "net.thisptr:jackson-jq" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" + "com.netflix.conductor:conductor-core" ] }, - "com.google.code.findbugs:jsr305": { - "locked": "3.0.2", + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", "transitive": [ - "com.google.guava:guava" + "com.netflix.conductor:conductor-core" ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:failureaccess": { - "locked": "1.0.1", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.guava:guava": { - "locked": "30.0-jre" - }, - "com.google.guava:listenablefuture": { - "locked": "9999.0-empty-to-avoid-conflict-with-guava", - "transitive": [ - "com.google.guava:guava" - ] - }, - "com.google.j2objc:j2objc-annotations": { - "locked": "1.3", + "locked": "2.4.0", "transitive": [ - "com.google.guava:guava" + "com.github.ben-manes.caffeine:caffeine" ] }, "com.google.protobuf:protobuf-java": { @@ -575,78 +297,23 @@ "com.netflix.spectator:spectator-api": { "locked": "0.122.0", "transitive": [ - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" + "com.netflix.conductor:conductor-core" ] }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0" - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0" - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, "com.spotify:completable-futures": { "locked": "0.3.3", "transitive": [ "com.netflix.conductor:conductor-core" ] }, - "commons-codec:commons-codec": { - "locked": "1.14", - "transitive": [ - "org.apache.httpcomponents:httpclient" - ] - }, "commons-io:commons-io": { "locked": "2.7", "transitive": [ "com.netflix.conductor:conductor-core" ] }, - "commons-logging:commons-logging": { - "locked": "1.2", - "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "org.apache.httpcomponents:httpclient" - ] - }, - "io.dropwizard.metrics:metrics-core": { - "locked": "4.1.22", - "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3" - ] - }, - "io.micrometer:micrometer-core": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-prometheus" - ] - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.6.2" - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0" - }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.9.0", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex:rxjava": { - "locked": "1.2.2", + "locked": "1.3.8", "transitive": [ "com.netflix.conductor:conductor-core" ] @@ -667,12 +334,6 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1" }, - "joda-time:joda-time": { - "locked": "2.8.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -685,9 +346,6 @@ "com.jayway.jsonpath:json-path" ] }, - "net.thisptr:jackson-jq": { - "locked": "0.0.13" - }, "org.apache.bval:bval-jsr": { "locked": "2.0.5", "transitive": [ @@ -702,21 +360,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.httpcomponents:httpclient": { - "locked": "4.5.13", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, - "org.apache.httpcomponents:httpcore": { - "locked": "4.4.14", - "transitive": [ - "org.apache.httpcomponents:httpclient" - ] - }, - "org.apache.kafka:kafka-clients": { - "locked": "2.6.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -764,39 +407,9 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "com.google.guava:guava" - ] - }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.12", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.jruby.jcodings:jcodings": { - "locked": "1.0.43", - "transitive": [ - "org.jruby.joni:joni" - ] - }, - "org.jruby.joni:joni": { - "locked": "2.1.27", - "transitive": [ - "net.thisptr:jackson-jq" - ] - }, - "org.latencyutils:LatencyUtils": { - "locked": "2.0.3", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.lz4:lz4-java": { - "locked": "1.7.1", + "locked": "3.8.0", "transitive": [ - "org.apache.kafka:kafka-clients" + "com.github.ben-manes.caffeine:caffeine" ] }, "org.ow2.asm:asm": { @@ -810,56 +423,11 @@ "transitive": [ "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "io.dropwizard.metrics:metrics-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl" ] - }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, - "software.amazon.ion:ion-java": { - "locked": "1.0.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] } }, "testCompileClasspath": { - "com.amazonaws:aws-java-sdk-core": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, - "com.amazonaws:aws-java-sdk-kms": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-s3" - ] - }, - "com.amazonaws:aws-java-sdk-s3": { - "locked": "1.11.86" - }, - "com.amazonaws:aws-java-sdk-sqs": { - "locked": "1.11.86" - }, - "com.amazonaws:jmespath-java": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -878,7 +446,6 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", @@ -889,27 +456,20 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "com.amazonaws:jmespath-java", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", "com.fasterxml.jackson.module:jackson-module-parameter-names", + "com.github.java-json-tools:json-patch", + "com.github.java-json-tools:json-schema-core", + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger.parser.v3:swagger-parser-v3", "io.swagger:swagger-core", - "net.thisptr:jackson-jq", "org.mock-server:mockserver-core", "org.springframework.boot:spring-boot-starter-json" ] }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { "locked": "2.11.4", "transitive": [ @@ -970,6 +530,7 @@ "com.github.java-json-tools:jackson-coreutils": { "locked": "2.0", "transitive": [ + "com.github.java-json-tools:json-patch", "com.github.java-json-tools:json-schema-core" ] }, @@ -980,6 +541,12 @@ "com.github.java-json-tools:json-schema-validator" ] }, + "com.github.java-json-tools:json-patch": { + "locked": "1.13", + "transitive": [ + "io.swagger:swagger-compat-spec-parser" + ] + }, "com.github.java-json-tools:json-schema-core": { "locked": "1.2.14", "transitive": [ @@ -989,13 +556,13 @@ "com.github.java-json-tools:json-schema-validator": { "locked": "2.2.14", "transitive": [ - "io.swagger:swagger-compat-spec-parser", - "org.mock-server:mockserver-core" + "io.swagger:swagger-compat-spec-parser" ] }, "com.github.java-json-tools:msg-simple": { "locked": "1.2", "transitive": [ + "com.github.java-json-tools:json-patch", "com.github.java-json-tools:uri-template" ] }, @@ -1005,12 +572,6 @@ "com.github.java-json-tools:json-schema-core" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.google.code.findbugs:jsr305": { "locked": "3.0.2", "transitive": [ @@ -1023,7 +584,7 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.7.1", "transitive": [ "com.google.guava:guava" ] @@ -1035,9 +596,11 @@ ] }, "com.google.guava:guava": { - "locked": "30.0-jre", + "locked": "31.0.1-android", "transitive": [ + "com.github.java-json-tools:json-schema-core", "com.github.java-json-tools:json-schema-validator", + "com.github.java-json-tools:uri-template", "io.swagger:swagger-core", "org.mock-server:mockserver-client-java", "org.mock-server:mockserver-core" @@ -1075,7 +638,7 @@ ] }, "com.lmax:disruptor": { - "locked": "3.4.2", + "locked": "3.4.4", "transitive": [ "org.mock-server:mockserver-core" ] @@ -1086,22 +649,12 @@ "com.netflix.conductor:conductor-core": { "project": true }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", + "com.networknt:json-schema-validator": { + "locked": "1.0.66", "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" + "org.mock-server:mockserver-core" ] }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0" - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0" - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, "com.sun.mail:mailapi": { "locked": "1.6.2", "transitive": [ @@ -1122,7 +675,7 @@ ] }, "commons-io:commons-io": { - "locked": "2.8.0", + "locked": "2.11.0", "transitive": [ "io.swagger.parser.v3:swagger-parser", "io.swagger.parser.v3:swagger-parser-v3", @@ -1130,31 +683,12 @@ "org.mock-server:mockserver-core" ] }, - "commons-logging:commons-logging": { - "locked": "1.2", + "io.github.classgraph:classgraph": { + "locked": "4.8.138", "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "org.apache.httpcomponents:httpclient" - ] - }, - "io.dropwizard.metrics:metrics-core": { - "locked": "4.1.22", - "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3" - ] - }, - "io.micrometer:micrometer-core": { - "locked": "1.5.14", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" + "org.mock-server:mockserver-core" ] }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.6.2" - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0" - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -1235,35 +769,20 @@ "org.mock-server:mockserver-core" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.9.0", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.2.2" - }, "io.swagger.core.v3:swagger-annotations": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.core.v3:swagger-core" ] }, "io.swagger.core.v3:swagger-core": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.parser.v3:swagger-parser-v3" ] }, "io.swagger.core.v3:swagger-models": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.core.v3:swagger-core", "io.swagger.parser.v3:swagger-parser-core", @@ -1272,45 +791,45 @@ ] }, "io.swagger.parser.v3:swagger-parser": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "org.mock-server:mockserver-core" ] }, "io.swagger.parser.v3:swagger-parser-core": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger.parser.v3:swagger-parser-v3" ] }, "io.swagger.parser.v3:swagger-parser-v2-converter": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser" ] }, "io.swagger.parser.v3:swagger-parser-v3": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser", "io.swagger.parser.v3:swagger-parser-v2-converter" ] }, "io.swagger:swagger-annotations": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger:swagger-models" ] }, "io.swagger:swagger-compat-spec-parser": { - "locked": "1.0.52", + "locked": "1.0.57", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter" ] }, "io.swagger:swagger-core": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger:swagger-compat-spec-parser", @@ -1318,13 +837,13 @@ ] }, "io.swagger:swagger-models": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger:swagger-core" ] }, "io.swagger:swagger-parser": { - "locked": "1.0.52", + "locked": "1.0.57", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger:swagger-compat-spec-parser" @@ -1353,40 +872,22 @@ "locked": "2.3.3", "transitive": [ "io.swagger.core.v3:swagger-core", + "org.mock-server:mockserver-core", "org.springframework.boot:spring-boot-starter-test" ] }, - "javax.activation:javax.activation-api": { - "locked": "1.2.0", - "transitive": [ - "javax.xml.bind:jaxb-api" - ] - }, "javax.servlet:javax.servlet-api": { "locked": "4.0.1", "transitive": [ "org.mock-server:mockserver-core" ] }, - "javax.validation:validation-api": { - "locked": "2.0.1.Final", - "transitive": [ - "io.swagger:swagger-core" - ] - }, "javax.ws.rs:jsr311-api": { "locked": "1.1.1" }, - "javax.xml.bind:jaxb-api": { - "locked": "2.3.1", - "transitive": [ - "org.mock-server:mockserver-core" - ] - }, "joda-time:joda-time": { "locked": "2.10.5", "transitive": [ - "com.amazonaws:aws-java-sdk-core", "com.github.java-json-tools:json-schema-validator" ] }, @@ -1417,7 +918,7 @@ ] }, "net.javacrumbs.json-unit:json-unit-core": { - "locked": "2.19.0", + "locked": "2.31.0", "transitive": [ "org.mock-server:mockserver-core" ] @@ -1440,9 +941,6 @@ "com.github.java-json-tools:json-schema-validator" ] }, - "net.thisptr:jackson-jq": { - "locked": "0.0.13" - }, "org.apache.commons:commons-compress": { "locked": "1.20", "transitive": [ @@ -1452,6 +950,7 @@ "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger:swagger-core", "org.apache.velocity:velocity-engine-core", @@ -1468,7 +967,6 @@ "org.apache.httpcomponents:httpclient": { "locked": "4.5.13", "transitive": [ - "com.amazonaws:aws-java-sdk-core", "io.swagger:swagger-compat-spec-parser" ] }, @@ -1478,9 +976,6 @@ "org.apache.httpcomponents:httpclient" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.6.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -1526,14 +1021,14 @@ ] }, "org.apache.velocity:velocity-engine-core": { - "locked": "2.2", + "locked": "2.3", "transitive": [ "org.apache.velocity:velocity-engine-scripting", "org.mock-server:mockserver-core" ] }, "org.apache.velocity:velocity-engine-scripting": { - "locked": "2.2", + "locked": "2.3", "transitive": [ "org.mock-server:mockserver-core" ] @@ -1554,8 +1049,14 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-compat-qual": { + "locked": "2.5.5", + "transitive": [ + "com.google.guava:guava" + ] + }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.12.0", "transitive": [ "com.google.guava:guava" ] @@ -1580,24 +1081,6 @@ "net.javacrumbs.json-unit:json-unit-core" ] }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.12", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.jruby.jcodings:jcodings": { - "locked": "1.0.43", - "transitive": [ - "org.jruby.joni:joni" - ] - }, - "org.jruby.joni:joni": { - "locked": "2.1.27", - "transitive": [ - "net.thisptr:jackson-jq" - ] - }, "org.junit.jupiter:junit-jupiter": { "locked": "5.6.3", "transitive": [ @@ -1647,17 +1130,11 @@ "org.junit.vintage:junit-vintage-engine" ] }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.mock-server:mockserver-client-java": { - "locked": "5.11.2" + "locked": "5.12.0" }, "org.mock-server:mockserver-core": { - "locked": "5.11.2", + "locked": "5.12.0", "transitive": [ "org.mock-server:mockserver-client-java" ] @@ -1732,28 +1209,18 @@ "com.github.docker-java:docker-java-api", "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", - "com.rabbitmq:amqp-client", - "io.dropwizard.metrics:metrics-core", + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger:swagger-core", "io.swagger:swagger-models", - "io.swagger:swagger-parser", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.velocity:velocity-engine-core", "org.mock-server:mockserver-client-java", "org.mock-server:mockserver-core", "org.slf4j:jul-to-slf4j", - "org.slf4j:slf4j-ext", "org.testcontainers:testcontainers" ] }, - "org.slf4j:slf4j-ext": { - "locked": "1.7.30", - "transitive": [ - "io.swagger:swagger-parser" - ] - }, "org.springframework.boot:spring-boot": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -1898,12 +1365,6 @@ "org.testcontainers:mockserver" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -1924,43 +1385,9 @@ "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "org.springframework.boot:spring-boot-starter" ] - }, - "software.amazon.ion:ion-java": { - "locked": "1.0.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] } }, "testRuntimeClasspath": { - "com.amazonaws:aws-java-sdk-core": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, - "com.amazonaws:aws-java-sdk-kms": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-s3" - ] - }, - "com.amazonaws:aws-java-sdk-s3": { - "locked": "1.11.86" - }, - "com.amazonaws:aws-java-sdk-sqs": { - "locked": "1.11.86" - }, - "com.amazonaws:jmespath-java": { - "locked": "1.11.86", - "transitive": [ - "com.amazonaws:aws-java-sdk-kms", - "com.amazonaws:aws-java-sdk-s3", - "com.amazonaws:aws-java-sdk-sqs" - ] - }, "com.fasterxml.jackson.core:jackson-annotations": { "locked": "2.11.4", "transitive": [ @@ -1980,7 +1407,6 @@ "locked": "2.11.4", "transitive": [ "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", @@ -1993,29 +1419,24 @@ "com.fasterxml.jackson.core:jackson-databind": { "locked": "2.11.4", "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "com.amazonaws:jmespath-java", - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor", "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", "com.fasterxml.jackson.module:jackson-module-parameter-names", + "com.github.java-json-tools:jackson-coreutils", + "com.github.java-json-tools:jackson-coreutils-equivalence", + "com.github.java-json-tools:json-patch", + "com.github.java-json-tools:json-schema-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger.parser.v3:swagger-parser-v3", "io.swagger:swagger-core", - "net.thisptr:jackson-jq", "org.mock-server:mockserver-core", "org.springframework.boot:spring-boot-starter-json" ] }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-cbor": { - "locked": "2.11.4", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] - }, "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { "locked": "2.11.4", "transitive": [ @@ -2049,6 +1470,12 @@ "org.mock-server:mockserver-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.docker-java:docker-java-api": { "locked": "3.2.8", "transitive": [ @@ -2077,6 +1504,7 @@ "locked": "2.0", "transitive": [ "com.github.java-json-tools:jackson-coreutils-equivalence", + "com.github.java-json-tools:json-patch", "com.github.java-json-tools:json-schema-core" ] }, @@ -2087,6 +1515,12 @@ "com.github.java-json-tools:json-schema-validator" ] }, + "com.github.java-json-tools:json-patch": { + "locked": "1.13", + "transitive": [ + "io.swagger:swagger-compat-spec-parser" + ] + }, "com.github.java-json-tools:json-schema-core": { "locked": "1.2.14", "transitive": [ @@ -2096,14 +1530,14 @@ "com.github.java-json-tools:json-schema-validator": { "locked": "2.2.14", "transitive": [ - "io.swagger:swagger-compat-spec-parser", - "org.mock-server:mockserver-core" + "io.swagger:swagger-compat-spec-parser" ] }, "com.github.java-json-tools:msg-simple": { "locked": "1.2", "transitive": [ "com.github.java-json-tools:jackson-coreutils", + "com.github.java-json-tools:json-patch", "com.github.java-json-tools:uri-template" ] }, @@ -2113,12 +1547,6 @@ "com.github.java-json-tools:json-schema-core" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.google.code.findbugs:jsr305": { "locked": "3.0.2", "transitive": [ @@ -2132,8 +1560,9 @@ ] }, "com.google.errorprone:error_prone_annotations": { - "locked": "2.3.4", + "locked": "2.7.1", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -2144,9 +1573,12 @@ ] }, "com.google.guava:guava": { - "locked": "30.0-jre", + "locked": "31.0.1-android", "transitive": [ + "com.github.java-json-tools:jackson-coreutils-equivalence", + "com.github.java-json-tools:json-schema-core", "com.github.java-json-tools:json-schema-validator", + "com.github.java-json-tools:uri-template", "io.swagger:swagger-core", "org.mock-server:mockserver-client-java", "org.mock-server:mockserver-core" @@ -2192,7 +1624,7 @@ ] }, "com.lmax:disruptor": { - "locked": "3.4.2", + "locked": "3.4.4", "transitive": [ "org.mock-server:mockserver-core" ] @@ -2215,19 +1647,14 @@ "com.netflix.spectator:spectator-api": { "locked": "0.122.0", "transitive": [ - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" + "com.netflix.conductor:conductor-core" ] }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0" - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0" - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" + "com.networknt:json-schema-validator": { + "locked": "1.0.66", + "transitive": [ + "org.mock-server:mockserver-core" + ] }, "com.spotify:completable-futures": { "locked": "0.3.3", @@ -2235,12 +1662,30 @@ "com.netflix.conductor:conductor-core" ] }, + "com.sun.activation:jakarta.activation": { + "locked": "1.2.2", + "transitive": [ + "com.sun.xml.bind:jaxb-core" + ] + }, "com.sun.mail:mailapi": { "locked": "1.6.2", "transitive": [ "com.github.java-json-tools:json-schema-validator" ] }, + "com.sun.xml.bind:jaxb-core": { + "locked": "3.0.2", + "transitive": [ + "com.sun.xml.bind:jaxb-impl" + ] + }, + "com.sun.xml.bind:jaxb-impl": { + "locked": "3.0.2", + "transitive": [ + "org.mock-server:mockserver-core" + ] + }, "com.vaadin.external.google:android-json": { "locked": "0.0.20131108.vaadin1", "transitive": [ @@ -2255,7 +1700,7 @@ ] }, "commons-io:commons-io": { - "locked": "2.8.0", + "locked": "2.11.0", "transitive": [ "com.netflix.conductor:conductor-core", "io.swagger.parser.v3:swagger-parser", @@ -2264,32 +1709,12 @@ "org.mock-server:mockserver-core" ] }, - "commons-logging:commons-logging": { - "locked": "1.2", - "transitive": [ - "com.amazonaws:aws-java-sdk-core", - "org.apache.httpcomponents:httpclient" - ] - }, - "io.dropwizard.metrics:metrics-core": { - "locked": "4.1.22", + "io.github.classgraph:classgraph": { + "locked": "4.8.138", "transitive": [ - "com.netflix.spectator:spectator-reg-metrics3" - ] - }, - "io.micrometer:micrometer-core": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-prometheus" + "org.mock-server:mockserver-core" ] }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.6.2" - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0" - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -2370,38 +1795,26 @@ "org.mock-server:mockserver-core" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.9.0", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex:rxjava": { - "locked": "1.2.2", + "locked": "1.3.8", "transitive": [ "com.netflix.conductor:conductor-core" ] }, "io.swagger.core.v3:swagger-annotations": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.core.v3:swagger-core" ] }, "io.swagger.core.v3:swagger-core": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.parser.v3:swagger-parser-v3" ] }, "io.swagger.core.v3:swagger-models": { - "locked": "2.1.5", + "locked": "2.1.13", "transitive": [ "io.swagger.core.v3:swagger-core", "io.swagger.parser.v3:swagger-parser-core", @@ -2410,45 +1823,45 @@ ] }, "io.swagger.parser.v3:swagger-parser": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "org.mock-server:mockserver-core" ] }, "io.swagger.parser.v3:swagger-parser-core": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger.parser.v3:swagger-parser-v3" ] }, "io.swagger.parser.v3:swagger-parser-v2-converter": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser" ] }, "io.swagger.parser.v3:swagger-parser-v3": { - "locked": "2.0.23", + "locked": "2.0.30", "transitive": [ "io.swagger.parser.v3:swagger-parser", "io.swagger.parser.v3:swagger-parser-v2-converter" ] }, "io.swagger:swagger-annotations": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger:swagger-models" ] }, "io.swagger:swagger-compat-spec-parser": { - "locked": "1.0.52", + "locked": "1.0.57", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter" ] }, "io.swagger:swagger-core": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger:swagger-compat-spec-parser", @@ -2456,13 +1869,13 @@ ] }, "io.swagger:swagger-models": { - "locked": "1.6.2", + "locked": "1.6.5", "transitive": [ "io.swagger:swagger-core" ] }, "io.swagger:swagger-parser": { - "locked": "1.0.52", + "locked": "1.0.57", "transitive": [ "io.swagger.parser.v3:swagger-parser-v2-converter", "io.swagger:swagger-compat-spec-parser" @@ -2492,41 +1905,24 @@ "locked": "2.3.3", "transitive": [ "com.netflix.conductor:conductor-core", + "com.sun.xml.bind:jaxb-core", "io.swagger.core.v3:swagger-core", + "org.mock-server:mockserver-core", "org.springframework.boot:spring-boot-starter-test" ] }, - "javax.activation:javax.activation-api": { - "locked": "1.2.0", - "transitive": [ - "javax.xml.bind:jaxb-api" - ] - }, "javax.servlet:javax.servlet-api": { "locked": "4.0.1", "transitive": [ "org.mock-server:mockserver-core" ] }, - "javax.validation:validation-api": { - "locked": "2.0.1.Final", - "transitive": [ - "io.swagger:swagger-core" - ] - }, "javax.ws.rs:jsr311-api": { "locked": "1.1.1" }, - "javax.xml.bind:jaxb-api": { - "locked": "2.3.1", - "transitive": [ - "org.mock-server:mockserver-core" - ] - }, "joda-time:joda-time": { "locked": "2.10.5", "transitive": [ - "com.amazonaws:aws-java-sdk-core", "com.github.java-json-tools:json-schema-validator" ] }, @@ -2557,7 +1953,7 @@ ] }, "net.javacrumbs.json-unit:json-unit-core": { - "locked": "2.19.0", + "locked": "2.31.0", "transitive": [ "org.mock-server:mockserver-core" ] @@ -2580,9 +1976,6 @@ "com.github.java-json-tools:json-schema-validator" ] }, - "net.thisptr:jackson-jq": { - "locked": "0.0.13" - }, "org.apache.bval:bval-jsr": { "locked": "2.0.5", "transitive": [ @@ -2601,6 +1994,7 @@ "transitive": [ "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger:swagger-core", "org.apache.velocity:velocity-engine-core", @@ -2617,7 +2011,6 @@ "org.apache.httpcomponents:httpclient": { "locked": "4.5.13", "transitive": [ - "com.amazonaws:aws-java-sdk-core", "io.swagger:swagger-compat-spec-parser" ] }, @@ -2627,9 +2020,6 @@ "org.apache.httpcomponents:httpclient" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.6.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -2693,14 +2083,14 @@ ] }, "org.apache.velocity:velocity-engine-core": { - "locked": "2.2", + "locked": "2.3", "transitive": [ "org.apache.velocity:velocity-engine-scripting", "org.mock-server:mockserver-core" ] }, "org.apache.velocity:velocity-engine-scripting": { - "locked": "2.2", + "locked": "2.3", "transitive": [ "org.mock-server:mockserver-core" ] @@ -2722,9 +2112,16 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-compat-qual": { + "locked": "2.5.5", + "transitive": [ + "com.google.guava:guava" + ] + }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.12.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava" ] }, @@ -2748,24 +2145,6 @@ "net.javacrumbs.json-unit:json-unit-core" ] }, - "org.hdrhistogram:HdrHistogram": { - "locked": "2.1.12", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.jruby.jcodings:jcodings": { - "locked": "1.0.43", - "transitive": [ - "org.jruby.joni:joni" - ] - }, - "org.jruby.joni:joni": { - "locked": "2.1.27", - "transitive": [ - "net.thisptr:jackson-jq" - ] - }, "org.junit.jupiter:junit-jupiter": { "locked": "5.6.3", "transitive": [ @@ -2825,23 +2204,11 @@ "org.junit.vintage:junit-vintage-engine" ] }, - "org.latencyutils:LatencyUtils": { - "locked": "2.0.3", - "transitive": [ - "io.micrometer:micrometer-core" - ] - }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.mock-server:mockserver-client-java": { - "locked": "5.11.2" + "locked": "5.12.0" }, "org.mock-server:mockserver-core": { - "locked": "5.11.2", + "locked": "5.12.0", "transitive": [ "org.mock-server:mockserver-client-java" ] @@ -2917,30 +2284,18 @@ "com.github.docker-java:docker-java-transport-zerodep", "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "io.dropwizard.metrics:metrics-core", + "com.networknt:json-schema-validator", "io.swagger.core.v3:swagger-core", "io.swagger:swagger-core", "io.swagger:swagger-models", - "io.swagger:swagger-parser", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.apache.velocity:velocity-engine-core", "org.mock-server:mockserver-client-java", "org.mock-server:mockserver-core", "org.slf4j:jul-to-slf4j", - "org.slf4j:slf4j-ext", "org.testcontainers:testcontainers" ] }, - "org.slf4j:slf4j-ext": { - "locked": "1.7.30", - "transitive": [ - "io.swagger:swagger-parser" - ] - }, "org.springframework.boot:spring-boot": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -3085,12 +2440,6 @@ "org.testcontainers:mockserver" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -3111,12 +2460,6 @@ "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", "org.springframework.boot:spring-boot-starter" ] - }, - "software.amazon.ion:ion-java": { - "locked": "1.0.1", - "transitive": [ - "com.amazonaws:aws-java-sdk-core" - ] } } } \ No newline at end of file diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/HttpTask.java b/http-task/src/main/java/com/netflix/conductor/tasks/http/HttpTask.java similarity index 99% rename from contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/HttpTask.java rename to http-task/src/main/java/com/netflix/conductor/tasks/http/HttpTask.java index 6e46d39fb3..bcac7a9b40 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/HttpTask.java +++ b/http-task/src/main/java/com/netflix/conductor/tasks/http/HttpTask.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.http; +package com.netflix.conductor.tasks.http; import java.io.IOException; import java.util.Collections; @@ -32,6 +32,7 @@ import com.netflix.conductor.core.utils.Utils; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; +import com.netflix.conductor.tasks.http.providers.RestTemplateProvider; import com.fasterxml.jackson.core.type.TypeReference; import com.fasterxml.jackson.databind.JsonNode; diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProvider.java b/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProvider.java similarity index 91% rename from contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProvider.java rename to http-task/src/main/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProvider.java index 670f260638..d460e36f63 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProvider.java +++ b/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProvider.java @@ -10,21 +10,20 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.http; +package com.netflix.conductor.tasks.http.providers; import java.time.Duration; import java.util.Optional; -import javax.annotation.Nonnull; - import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Value; import org.springframework.boot.web.client.RestTemplateBuilder; import org.springframework.http.client.HttpComponentsClientHttpRequestFactory; +import org.springframework.lang.NonNull; import org.springframework.stereotype.Component; import org.springframework.web.client.RestTemplate; -import com.netflix.conductor.contribs.tasks.http.HttpTask.Input; +import com.netflix.conductor.tasks.http.HttpTask; /** * Provider for a customized RestTemplateBuilder. This class provides a default {@link @@ -48,7 +47,7 @@ public DefaultRestTemplateProvider( } @Override - public @Nonnull RestTemplate getRestTemplate(@Nonnull Input input) { + public @NonNull RestTemplate getRestTemplate(@NonNull HttpTask.Input input) { RestTemplate restTemplate = threadLocalRestTemplate.get(); HttpComponentsClientHttpRequestFactory requestFactory = new HttpComponentsClientHttpRequestFactory(); diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/RestTemplateProvider.java b/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/RestTemplateProvider.java similarity index 77% rename from contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/RestTemplateProvider.java rename to http-task/src/main/java/com/netflix/conductor/tasks/http/providers/RestTemplateProvider.java index 95c648b079..968904d2e2 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/http/RestTemplateProvider.java +++ b/http-task/src/main/java/com/netflix/conductor/tasks/http/providers/RestTemplateProvider.java @@ -10,14 +10,15 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.http; - -import javax.annotation.Nonnull; +package com.netflix.conductor.tasks.http.providers; +import org.springframework.lang.NonNull; import org.springframework.web.client.RestTemplate; +import com.netflix.conductor.tasks.http.HttpTask; + @FunctionalInterface public interface RestTemplateProvider { - RestTemplate getRestTemplate(@Nonnull HttpTask.Input input); + RestTemplate getRestTemplate(@NonNull HttpTask.Input input); } diff --git a/http-task/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/http-task/src/main/resources/META-INF/additional-spring-configuration-metadata.json new file mode 100644 index 0000000000..17c071f9aa --- /dev/null +++ b/http-task/src/main/resources/META-INF/additional-spring-configuration-metadata.json @@ -0,0 +1,14 @@ +{ + "properties": [ + { + "name": "conductor.tasks.http.readTimeout", + "type": "java.lang.Integer", + "description": "The read timeout of the underlying HttpClient used by the HTTP task." + }, + { + "name": "conductor.tasks.http.connectTimeout", + "type": "java.lang.Integer", + "description": "The connection timeout of the underlying HttpClient used by the HTTP task." + } + ] +} diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/HttpTaskTest.java b/http-task/src/test/java/com/netflix/conductor/tasks/http/HttpTaskTest.java similarity index 92% rename from contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/HttpTaskTest.java rename to http-task/src/test/java/com/netflix/conductor/tasks/http/HttpTaskTest.java index 57db66d304..f826625560 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/HttpTaskTest.java +++ b/http-task/src/test/java/com/netflix/conductor/tasks/http/HttpTaskTest.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.http; +package com.netflix.conductor.tasks.http; import java.time.Duration; import java.time.Instant; @@ -19,8 +19,13 @@ import java.util.Map; import java.util.Set; -import org.junit.*; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.ClassRule; +import org.junit.Test; import org.mockserver.client.MockServerClient; +import org.mockserver.model.HttpRequest; +import org.mockserver.model.HttpResponse; import org.mockserver.model.MediaType; import org.testcontainers.containers.MockServerContainer; import org.testcontainers.utility.DockerImageName; @@ -37,6 +42,7 @@ import com.netflix.conductor.dao.MetadataDAO; import com.netflix.conductor.model.TaskModel; import com.netflix.conductor.model.WorkflowModel; +import com.netflix.conductor.tasks.http.providers.DefaultRestTemplateProvider; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.core.type.TypeReference; @@ -44,11 +50,8 @@ import static org.junit.Assert.*; import static org.mockito.Mockito.mock; -import static org.mockserver.model.HttpRequest.request; -import static org.mockserver.model.HttpResponse.response; @SuppressWarnings("unchecked") -// @Ignore // Test causes "OutOfMemoryError" error during build public class HttpTaskTest { private static final String ERROR_RESPONSE = "Something went wrong!"; @@ -64,7 +67,8 @@ public class HttpTaskTest { @ClassRule public static MockServerContainer mockServer = - new MockServerContainer(DockerImageName.parse("mockserver/mockserver")); + new MockServerContainer( + DockerImageName.parse("mockserver/mockserver").withTag("mockserver-5.12.0")); @BeforeClass public static void init() throws Exception { @@ -77,7 +81,7 @@ public static void init() throws Exception { final TypeReference> mapOfObj = new TypeReference<>() {}; MockServerClient client = new MockServerClient(mockServer.getHost(), mockServer.getServerPort()); - client.when(request().withPath("/post").withMethod("POST")) + client.when(HttpRequest.request().withPath("/post").withMethod("POST")) .respond( request -> { Map reqBody = @@ -85,25 +89,25 @@ public static void init() throws Exception { Set keys = reqBody.keySet(); Map respBody = new HashMap<>(); keys.forEach(k -> respBody.put(k, k)); - return response() + return HttpResponse.response() .withContentType(MediaType.APPLICATION_JSON) .withBody(objectMapper.writeValueAsString(respBody)); }); - client.when(request().withPath("/post2").withMethod("POST")) - .respond(response().withStatusCode(204)); - client.when(request().withPath("/failure").withMethod("GET")) + client.when(HttpRequest.request().withPath("/post2").withMethod("POST")) + .respond(HttpResponse.response().withStatusCode(204)); + client.when(HttpRequest.request().withPath("/failure").withMethod("GET")) .respond( - response() + HttpResponse.response() .withStatusCode(500) .withContentType(MediaType.TEXT_PLAIN) .withBody(ERROR_RESPONSE)); - client.when(request().withPath("/text").withMethod("GET")) - .respond(response().withBody(TEXT_RESPONSE)); - client.when(request().withPath("/numeric").withMethod("GET")) - .respond(response().withBody(String.valueOf(NUM_RESPONSE))); - client.when(request().withPath("/json").withMethod("GET")) + client.when(HttpRequest.request().withPath("/text").withMethod("GET")) + .respond(HttpResponse.response().withBody(TEXT_RESPONSE)); + client.when(HttpRequest.request().withPath("/numeric").withMethod("GET")) + .respond(HttpResponse.response().withBody(String.valueOf(NUM_RESPONSE))); + client.when(HttpRequest.request().withPath("/json").withMethod("GET")) .respond( - response() + HttpResponse.response() .withContentType(MediaType.APPLICATION_JSON) .withBody(JSON_RESPONSE)); } diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProviderTest.java b/http-task/src/test/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProviderTest.java similarity index 92% rename from contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProviderTest.java rename to http-task/src/test/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProviderTest.java index 31e5ef2d45..816f5d6f95 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/http/DefaultRestTemplateProviderTest.java +++ b/http-task/src/test/java/com/netflix/conductor/tasks/http/providers/DefaultRestTemplateProviderTest.java @@ -10,16 +10,16 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.http; +package com.netflix.conductor.tasks.http.providers; import java.time.Duration; import org.junit.Test; import org.springframework.web.client.RestTemplate; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertSame; +import com.netflix.conductor.tasks.http.HttpTask; + +import static org.junit.Assert.*; public class DefaultRestTemplateProviderTest { diff --git a/json-jq-task/build.gradle b/json-jq-task/build.gradle new file mode 100644 index 0000000000..24e98a6e5f --- /dev/null +++ b/json-jq-task/build.gradle @@ -0,0 +1,21 @@ +/* + * Copyright 2022 Netflix, Inc. + *

+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +dependencies { + implementation project(':conductor-common') + implementation project(':conductor-core') + compileOnly 'org.springframework.boot:spring-boot-starter' + + implementation "net.thisptr:jackson-jq:${revJq}" + implementation "com.github.ben-manes.caffeine:caffeine" +} diff --git a/zookeeper-lock/dependencies.lock b/json-jq-task/dependencies.lock similarity index 83% rename from zookeeper-lock/dependencies.lock rename to json-jq-task/dependencies.lock index e2c2a63e65..a22fcc57b4 100644 --- a/zookeeper-lock/dependencies.lock +++ b/json-jq-task/dependencies.lock @@ -5,64 +5,47 @@ } }, "compileClasspath": { - "com.google.guava:guava": { - "locked": "14.0.1", - "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", + "com.fasterxml.jackson.core:jackson-annotations": { + "locked": "2.11.4", "transitive": [ - "org.springframework.boot:spring-boot-starter" + "com.fasterxml.jackson.core:jackson-databind" ] }, - "javax.activation:activation": { - "locked": "1.1", + "com.fasterxml.jackson.core:jackson-core": { + "locked": "2.11.4", "transitive": [ - "javax.mail:mail" + "com.fasterxml.jackson.core:jackson-databind" ] }, - "javax.mail:mail": { - "locked": "1.4", + "com.fasterxml.jackson.core:jackson-databind": { + "locked": "2.11.4", "transitive": [ - "log4j:log4j" + "net.thisptr:jackson-jq" ] }, - "jline:jline": { - "locked": "0.9.94", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" }, - "log4j:log4j": { - "locked": "1.2.15", + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", "transitive": [ - "org.apache.zookeeper:zookeeper" + "com.github.ben-manes.caffeine:caffeine" ] }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" + "com.netflix.conductor:conductor-common": { + "project": true }, - "org.apache.curator:curator-client": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-framework" - ] + "com.netflix.conductor:conductor-core": { + "project": true }, - "org.apache.curator:curator-framework": { - "locked": "2.4.0", + "jakarta.annotation:jakarta.annotation-api": { + "locked": "1.3.5", "transitive": [ - "org.apache.curator:curator-recipes" + "org.springframework.boot:spring-boot-starter" ] }, - "org.apache.curator:curator-recipes": { - "locked": "2.4.0" + "net.thisptr:jackson-jq": { + "locked": "0.0.13" }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", @@ -88,18 +71,22 @@ "org.apache.logging.log4j:log4j-web": { "locked": "2.17.1" }, - "org.apache.zookeeper:zookeeper": { - "locked": "3.4.5", + "org.checkerframework:checker-qual": { + "locked": "3.8.0", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes" + "com.github.ben-manes.caffeine:caffeine" ] }, - "org.jboss.netty:netty": { - "locked": "3.2.2.Final", + "org.jruby.jcodings:jcodings": { + "locked": "1.0.43", "transitive": [ - "org.apache.zookeeper:zookeeper" + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.27", + "transitive": [ + "net.thisptr:jackson-jq" ] }, "org.slf4j:jul-to-slf4j": { @@ -111,9 +98,7 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "org.apache.curator:curator-client", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.zookeeper:zookeeper", "org.slf4j:jul-to-slf4j" ] }, @@ -208,15 +193,20 @@ "locked": "2.11.4", "transitive": [ "com.netflix.conductor:conductor-common", + "com.netflix.conductor:conductor-core", + "net.thisptr:jackson-jq" + ] + }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ "com.netflix.conductor:conductor-core" ] }, - "com.google.guava:guava": { - "locked": "14.0.1", + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes" + "com.github.ben-manes.caffeine:caffeine" ] }, "com.google.protobuf:protobuf-java": { @@ -284,30 +274,6 @@ "com.netflix.conductor:conductor-core" ] }, - "javax.activation:activation": { - "locked": "1.1", - "transitive": [ - "javax.mail:mail" - ] - }, - "javax.mail:mail": { - "locked": "1.4", - "transitive": [ - "log4j:log4j" - ] - }, - "jline:jline": { - "locked": "0.9.94", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, - "log4j:log4j": { - "locked": "1.2.15", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, "net.minidev:accessors-smart": { "locked": "2.3.1", "transitive": [ @@ -320,6 +286,9 @@ "com.jayway.jsonpath:json-path" ] }, + "net.thisptr:jackson-jq": { + "locked": "0.0.13" + }, "org.apache.bval:bval-jsr": { "locked": "2.0.5", "transitive": [ @@ -334,21 +303,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.curator:curator-client": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-framework" - ] - }, - "org.apache.curator:curator-framework": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-recipes" - ] - }, - "org.apache.curator:curator-recipes": { - "locked": "2.4.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -395,18 +349,22 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.zookeeper:zookeeper": { - "locked": "3.4.5", + "org.checkerframework:checker-qual": { + "locked": "3.8.0", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes" + "com.github.ben-manes.caffeine:caffeine" ] }, - "org.jboss.netty:netty": { - "locked": "3.2.2.Final", + "org.jruby.jcodings:jcodings": { + "locked": "1.0.43", "transitive": [ - "org.apache.zookeeper:zookeeper" + "org.jruby.joni:joni" + ] + }, + "org.jruby.joni:joni": { + "locked": "2.1.27", + "transitive": [ + "net.thisptr:jackson-jq" ] }, "org.ow2.asm:asm": { @@ -420,20 +378,36 @@ "transitive": [ "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "org.apache.curator:curator-client", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.zookeeper:zookeeper" + "org.apache.logging.log4j:log4j-slf4j-impl" ] } }, "testCompileClasspath": { - "com.google.guava:guava": { - "locked": "14.0.1", + "com.fasterxml.jackson.core:jackson-annotations": { + "locked": "2.11.4", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes", - "org.apache.curator:curator-test" + "com.fasterxml.jackson.core:jackson-databind" + ] + }, + "com.fasterxml.jackson.core:jackson-core": { + "locked": "2.11.4", + "transitive": [ + "com.fasterxml.jackson.core:jackson-databind" + ] + }, + "com.fasterxml.jackson.core:jackson-databind": { + "locked": "2.11.4", + "transitive": [ + "net.thisptr:jackson-jq" + ] + }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8" + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" ] }, "com.jayway.jsonpath:json-path": { @@ -442,6 +416,9 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "com.netflix.conductor:conductor-common": { + "project": true + }, "com.netflix.conductor:conductor-core": { "project": true }, @@ -469,36 +446,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "javax.activation:activation": { - "locked": "1.1", - "transitive": [ - "javax.mail:mail" - ] - }, - "javax.mail:mail": { - "locked": "1.4", - "transitive": [ - "log4j:log4j" - ] - }, - "jline:jline": { - "locked": "0.9.94", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, "junit:junit": { "locked": "4.13.2", "transitive": [ "org.junit.vintage:junit-vintage-engine" ] }, - "log4j:log4j": { - "locked": "1.2.15", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -523,32 +476,8 @@ "com.jayway.jsonpath:json-path" ] }, - "org.apache.commons:commons-lang3": { - "locked": "3.10" - }, - "org.apache.commons:commons-math": { - "locked": "2.2", - "transitive": [ - "org.apache.curator:curator-test" - ] - }, - "org.apache.curator:curator-client": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-framework" - ] - }, - "org.apache.curator:curator-framework": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-recipes" - ] - }, - "org.apache.curator:curator-recipes": { - "locked": "2.4.0" - }, - "org.apache.curator:curator-test": { - "locked": "2.4.0" + "net.thisptr:jackson-jq": { + "locked": "0.0.13" }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", @@ -581,15 +510,6 @@ "org.apache.logging.log4j:log4j-web": { "locked": "2.17.1" }, - "org.apache.zookeeper:zookeeper": { - "locked": "3.4.5", - "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes", - "org.apache.curator:curator-test" - ] - }, "org.apiguardian:apiguardian-api": { "locked": "1.1.0", "transitive": [ @@ -606,22 +526,28 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.javassist:javassist": { - "locked": "3.15.0-GA", + "org.jruby.jcodings:jcodings": { + "locked": "1.0.43", "transitive": [ - "org.apache.curator:curator-test" + "org.jruby.joni:joni" ] }, - "org.jboss.netty:netty": { - "locked": "3.2.2.Final", + "org.jruby.joni:joni": { + "locked": "2.1.27", "transitive": [ - "org.apache.zookeeper:zookeeper" + "net.thisptr:jackson-jq" ] }, "org.junit.jupiter:junit-jupiter": { @@ -722,9 +648,7 @@ "locked": "1.7.30", "transitive": [ "com.jayway.jsonpath:json-path", - "org.apache.curator:curator-client", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.zookeeper:zookeeper", "org.slf4j:jul-to-slf4j" ] }, @@ -858,16 +782,20 @@ "locked": "2.11.4", "transitive": [ "com.netflix.conductor:conductor-common", + "com.netflix.conductor:conductor-core", + "net.thisptr:jackson-jq" + ] + }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ "com.netflix.conductor:conductor-core" ] }, - "com.google.guava:guava": { - "locked": "14.0.1", + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes", - "org.apache.curator:curator-test" + "com.github.ben-manes.caffeine:caffeine" ] }, "com.google.protobuf:protobuf-java": { @@ -949,36 +877,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "javax.activation:activation": { - "locked": "1.1", - "transitive": [ - "javax.mail:mail" - ] - }, - "javax.mail:mail": { - "locked": "1.4", - "transitive": [ - "log4j:log4j" - ] - }, - "jline:jline": { - "locked": "0.9.94", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, "junit:junit": { "locked": "4.13.2", "transitive": [ "org.junit.vintage:junit-vintage-engine" ] }, - "log4j:log4j": { - "locked": "1.2.15", - "transitive": [ - "org.apache.zookeeper:zookeeper" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -1003,6 +907,9 @@ "com.jayway.jsonpath:json-path" ] }, + "net.thisptr:jackson-jq": { + "locked": "0.0.13" + }, "org.apache.bval:bval-jsr": { "locked": "2.0.5", "transitive": [ @@ -1017,30 +924,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.commons:commons-math": { - "locked": "2.2", - "transitive": [ - "org.apache.curator:curator-test" - ] - }, - "org.apache.curator:curator-client": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-framework" - ] - }, - "org.apache.curator:curator-framework": { - "locked": "2.4.0", - "transitive": [ - "org.apache.curator:curator-recipes" - ] - }, - "org.apache.curator:curator-recipes": { - "locked": "2.4.0" - }, - "org.apache.curator:curator-test": { - "locked": "2.4.0" - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ @@ -1090,15 +973,6 @@ "com.netflix.conductor:conductor-core" ] }, - "org.apache.zookeeper:zookeeper": { - "locked": "3.4.5", - "transitive": [ - "org.apache.curator:curator-client", - "org.apache.curator:curator-framework", - "org.apache.curator:curator-recipes", - "org.apache.curator:curator-test" - ] - }, "org.apiguardian:apiguardian-api": { "locked": "1.1.0", "transitive": [ @@ -1116,22 +990,28 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.javassist:javassist": { - "locked": "3.15.0-GA", + "org.jruby.jcodings:jcodings": { + "locked": "1.0.43", "transitive": [ - "org.apache.curator:curator-test" + "org.jruby.joni:joni" ] }, - "org.jboss.netty:netty": { - "locked": "3.2.2.Final", + "org.jruby.joni:joni": { + "locked": "2.1.27", "transitive": [ - "org.apache.zookeeper:zookeeper" + "net.thisptr:jackson-jq" ] }, "org.junit.jupiter:junit-jupiter": { @@ -1243,9 +1123,7 @@ "transitive": [ "com.jayway.jsonpath:json-path", "com.netflix.spectator:spectator-api", - "org.apache.curator:curator-client", "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.zookeeper:zookeeper", "org.slf4j:jul-to-slf4j" ] }, diff --git a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransform.java b/json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java similarity index 88% rename from contribs/src/main/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransform.java rename to json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java index 78d47afd6b..ed49c40d2e 100644 --- a/contribs/src/main/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransform.java +++ b/json-jq-task/src/main/java/com/netflix/conductor/tasks/json/JsonJqTransform.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.json; +package com.netflix.conductor.tasks.json; import java.util.ArrayList; import java.util.List; @@ -29,12 +29,11 @@ import com.fasterxml.jackson.databind.JsonNode; import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; +import com.github.benmanes.caffeine.cache.CacheLoader; +import com.github.benmanes.caffeine.cache.Caffeine; +import com.github.benmanes.caffeine.cache.LoadingCache; import net.thisptr.jackson.jq.JsonQuery; import net.thisptr.jackson.jq.Scope; -import net.thisptr.jackson.jq.exception.JsonQueryException; @Component(JsonJqTransform.NAME) public class JsonJqTransform extends WorkflowSystemTask { @@ -103,14 +102,8 @@ public void start(WorkflowModel workflow, TaskModel task, WorkflowExecutor execu } private LoadingCache createQueryCache() { - final CacheLoader loader = - new CacheLoader<>() { - @Override - public JsonQuery load(String query) throws JsonQueryException { - return JsonQuery.compile(query); - } - }; - return CacheBuilder.newBuilder() + final CacheLoader loader = JsonQuery::compile; + return Caffeine.newBuilder() .expireAfterWrite(1, TimeUnit.HOURS) .maximumSize(1000) .build(loader); diff --git a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransformTest.java b/json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java similarity index 95% rename from contribs/src/test/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransformTest.java rename to json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java index 553130ab8f..78db2cca22 100644 --- a/contribs/src/test/java/com/netflix/conductor/contribs/tasks/json/JsonJqTransformTest.java +++ b/json-jq-task/src/test/java/com/netflix/conductor/tasks/json/JsonJqTransformTest.java @@ -10,7 +10,7 @@ * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ -package com.netflix.conductor.contribs.tasks.json; +package com.netflix.conductor.tasks.json; import java.util.Collections; import java.util.HashMap; @@ -24,9 +24,7 @@ import com.fasterxml.jackson.databind.ObjectMapper; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.Assert.*; public class JsonJqTransformTest { diff --git a/mysql-persistence/build.gradle b/mysql-persistence/build.gradle deleted file mode 100644 index 081da89612..0000000000 --- a/mysql-persistence/build.gradle +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -dependencies { - implementation project(':conductor-common') - implementation project(':conductor-core') - compileOnly 'org.springframework.boot:spring-boot-starter' - compileOnly 'org.springframework.retry:spring-retry' - - // SBMTODO: remove guava dep - implementation "com.google.guava:guava:${revGuava}" - - implementation "com.fasterxml.jackson.core:jackson-databind" - implementation "com.fasterxml.jackson.core:jackson-core" - - implementation "org.apache.commons:commons-lang3" - - implementation "mysql:mysql-connector-java" - implementation "org.springframework.boot:spring-boot-starter-jdbc" - implementation "org.flywaydb:flyway-core" - - testImplementation 'org.springframework.retry:spring-retry' - testImplementation "org.testcontainers:mysql:${revTestContainer}" - - testImplementation project(':conductor-core').sourceSets.test.output - testImplementation project(':conductor-common').sourceSets.test.output -} - -test { - //the MySQL unit tests must run within the same JVM to share the same embedded DB - maxParallelForks = 1 -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java deleted file mode 100644 index e45fc3b2bb..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLConfiguration.java +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.config; - -import java.sql.SQLException; -import java.util.Optional; - -import javax.sql.DataSource; - -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Import; -import org.springframework.retry.RetryContext; -import org.springframework.retry.backoff.NoBackOffPolicy; -import org.springframework.retry.policy.SimpleRetryPolicy; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.mysql.dao.MySQLExecutionDAO; -import com.netflix.conductor.mysql.dao.MySQLMetadataDAO; -import com.netflix.conductor.mysql.dao.MySQLQueueDAO; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import static com.mysql.cj.exceptions.MysqlErrorNumbers.ER_LOCK_DEADLOCK; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(MySQLProperties.class) -@ConditionalOnProperty(name = "conductor.db.type", havingValue = "mysql") -// Import the DataSourceAutoConfiguration when mysql database is selected. -// By default the datasource configuration is excluded in the main module. -@Import(DataSourceAutoConfiguration.class) -public class MySQLConfiguration { - - @Bean - @DependsOn({"flyway", "flywayInitializer"}) - public MySQLMetadataDAO mySqlMetadataDAO( - @Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper, - DataSource dataSource, - MySQLProperties properties) { - return new MySQLMetadataDAO(retryTemplate, objectMapper, dataSource, properties); - } - - @Bean - @DependsOn({"flyway", "flywayInitializer"}) - public MySQLExecutionDAO mySqlExecutionDAO( - @Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper, - DataSource dataSource) { - return new MySQLExecutionDAO(retryTemplate, objectMapper, dataSource); - } - - @Bean - @DependsOn({"flyway", "flywayInitializer"}) - public MySQLQueueDAO mySqlQueueDAO( - @Qualifier("mysqlRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper, - DataSource dataSource) { - return new MySQLQueueDAO(retryTemplate, objectMapper, dataSource); - } - - @Bean - public RetryTemplate mysqlRetryTemplate(MySQLProperties properties) { - SimpleRetryPolicy retryPolicy = new CustomRetryPolicy(); - retryPolicy.setMaxAttempts(properties.getDeadlockRetryMax()); - - RetryTemplate retryTemplate = new RetryTemplate(); - retryTemplate.setRetryPolicy(retryPolicy); - retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); - return retryTemplate; - } - - public static class CustomRetryPolicy extends SimpleRetryPolicy { - - @Override - public boolean canRetry(final RetryContext context) { - final Optional lastThrowable = - Optional.ofNullable(context.getLastThrowable()); - return lastThrowable - .map(throwable -> super.canRetry(context) && isDeadLockError(throwable)) - .orElseGet(() -> super.canRetry(context)); - } - - private boolean isDeadLockError(Throwable throwable) { - SQLException sqlException = findCauseSQLException(throwable); - if (sqlException == null) { - return false; - } - return ER_LOCK_DEADLOCK == sqlException.getErrorCode(); - } - - private SQLException findCauseSQLException(Throwable throwable) { - Throwable causeException = throwable; - while (null != causeException && !(causeException instanceof SQLException)) { - causeException = causeException.getCause(); - } - return (SQLException) causeException; - } - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java deleted file mode 100644 index 42f9f74a3a..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/config/MySQLProperties.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.config; - -import java.time.Duration; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -@ConfigurationProperties("conductor.mysql") -public class MySQLProperties { - - /** The time (in seconds) after which the in-memory task definitions cache will be refreshed */ - private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60); - - private Integer deadlockRetryMax = 3; - - public Duration getTaskDefCacheRefreshInterval() { - return taskDefCacheRefreshInterval; - } - - public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) { - this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval; - } - - public Integer getDeadlockRetryMax() { - return deadlockRetryMax; - } - - public void setDeadlockRetryMax(Integer deadlockRetryMax) { - this.deadlockRetryMax = deadlockRetryMax; - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java deleted file mode 100644 index d48b14ed2d..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLBaseDAO.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.time.Duration; -import java.time.Instant; -import java.util.Arrays; -import java.util.List; -import java.util.function.Consumer; - -import javax.sql.DataSource; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.mysql.util.*; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableList; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.*; - -public abstract class MySQLBaseDAO { - - private static final List EXCLUDED_STACKTRACE_CLASS = - ImmutableList.of(MySQLBaseDAO.class.getName(), Thread.class.getName()); - - protected final Logger logger = LoggerFactory.getLogger(getClass()); - protected final ObjectMapper objectMapper; - protected final DataSource dataSource; - - private final RetryTemplate retryTemplate; - - protected MySQLBaseDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - this.retryTemplate = retryTemplate; - this.objectMapper = objectMapper; - this.dataSource = dataSource; - } - - protected final LazyToString getCallingMethod() { - return new LazyToString( - () -> - Arrays.stream(Thread.currentThread().getStackTrace()) - .filter( - ste -> - !EXCLUDED_STACKTRACE_CLASS.contains( - ste.getClassName())) - .findFirst() - .map(StackTraceElement::getMethodName) - .orElseThrow(() -> new NullPointerException("Cannot find Caller"))); - } - - protected String toJson(Object value) { - try { - return objectMapper.writeValueAsString(value); - } catch (JsonProcessingException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - protected T readValue(String json, Class tClass) { - try { - return objectMapper.readValue(json, tClass); - } catch (IOException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - protected T readValue(String json, TypeReference typeReference) { - try { - return objectMapper.readValue(json, typeReference); - } catch (IOException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - /** - * Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to - * {@literal function}. - * - *

Successful executions of {@literal function} will result in a commit and return of {@link - * TransactionalFunction#apply(Connection)}. - * - *

If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will - * result in a rollback of the transaction and will be wrapped in an {@link - * ApplicationException} if it is not already one. - * - *

Generally this is used to wrap multiple {@link #execute(Connection, String, - * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that - * produce some expected return value. - * - * @param function The function to apply with a new transactional {@link Connection} - * @param The return type. - * @return The result of {@code TransactionalFunction#apply(Connection)} - * @throws ApplicationException If any errors occur. - */ - private R getWithTransaction(final TransactionalFunction function) { - final Instant start = Instant.now(); - LazyToString callingMethod = getCallingMethod(); - logger.trace("{} : starting transaction", callingMethod); - - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(false); - try { - R result = function.apply(tx); - tx.commit(); - return result; - } catch (Throwable th) { - tx.rollback(); - if (th instanceof ApplicationException) { - throw th; - } - throw new ApplicationException(BACKEND_ERROR, th.getMessage(), th); - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } finally { - logger.trace( - "{} : took {}ms", - callingMethod, - Duration.between(start, Instant.now()).toMillis()); - } - } - - R getWithRetriedTransactions(final TransactionalFunction function) { - try { - return retryTemplate.execute(context -> getWithTransaction(function)); - } catch (Exception e) { - throw (ApplicationException) e; - } - } - - protected R getWithTransactionWithOutErrorPropagation(TransactionalFunction function) { - Instant start = Instant.now(); - LazyToString callingMethod = getCallingMethod(); - logger.trace("{} : starting transaction", callingMethod); - - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(false); - try { - R result = function.apply(tx); - tx.commit(); - return result; - } catch (Throwable th) { - tx.rollback(); - logger.info(CONFLICT + " " + th.getMessage()); - return null; - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } finally { - logger.trace( - "{} : took {}ms", - callingMethod, - Duration.between(start, Instant.now()).toMillis()); - } - } - - /** - * Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value. - * - *

Generally this is used to wrap multiple {@link #execute(Connection, String, - * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that - * produce no expected return value. - * - * @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to. - * @throws ApplicationException If any errors occur. - * @see #getWithRetriedTransactions(TransactionalFunction) - */ - protected void withTransaction(Consumer consumer) { - getWithRetriedTransactions( - connection -> { - consumer.accept(connection); - return null; - }); - } - - /** - * Initiate a new transaction and execute a {@link Query} within that context, then return the - * results of {@literal function}. - * - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - * @param The expected return type of {@literal function}. - * @return The results of applying {@literal function}. - */ - protected R queryWithTransaction(String query, QueryFunction function) { - return getWithRetriedTransactions(tx -> query(tx, query, function)); - } - - /** - * Execute a {@link Query} within the context of a given transaction and return the results of - * {@literal function}. - * - * @param tx The transactional {@link Connection} to use. - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - * @param The expected return type of {@literal function}. - * @return The results of applying {@literal function}. - */ - protected R query(Connection tx, String query, QueryFunction function) { - try (Query q = new Query(objectMapper, tx, query)) { - return function.apply(q); - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex); - } - } - - /** - * Execute a statement with no expected return value within a given transaction. - * - * @param tx The transactional {@link Connection} to use. - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - */ - protected void execute(Connection tx, String query, ExecuteFunction function) { - try (Query q = new Query(objectMapper, tx, query)) { - function.apply(q); - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex); - } - } - - /** - * Instantiates a new transactional connection and invokes {@link #execute(Connection, String, - * ExecuteFunction)} - * - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - */ - protected void executeWithTransaction(String query, ExecuteFunction function) { - withTransaction(tx -> execute(tx, query, function)); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java deleted file mode 100644 index d4f0c54c4a..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAO.java +++ /dev/null @@ -1,1071 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.sql.Connection; -import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.*; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.common.metadata.events.EventExecution; -import com.netflix.conductor.common.metadata.tasks.PollData; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.PollDataDAO; -import com.netflix.conductor.dao.RateLimitingDAO; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.model.TaskModel; -import com.netflix.conductor.model.WorkflowModel; -import com.netflix.conductor.mysql.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.BACKEND_ERROR; - -public class MySQLExecutionDAO extends MySQLBaseDAO - implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO { - - public MySQLExecutionDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - super(retryTemplate, objectMapper, dataSource); - } - - private static String dateStr(Long timeInMs) { - Date date = new Date(timeInMs); - return dateStr(date); - } - - private static String dateStr(Date date) { - SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); - return format.format(date); - } - - @Override - public List getPendingTasksByWorkflow(String taskDefName, String workflowId) { - // @formatter:off - String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW = - "SELECT json_data FROM task_in_progress tip " - + "INNER JOIN task t ON t.task_id = tip.task_id " - + "WHERE task_def_name = ? AND workflow_id = ?"; - // @formatter:on - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_FOR_WORKFLOW, - q -> - q.addParameter(taskDefName) - .addParameter(workflowId) - .executeAndFetch(TaskModel.class)); - } - - @Override - public List getTasks(String taskDefName, String startKey, int count) { - List tasks = new ArrayList<>(count); - - List pendingTasks = getPendingTasksForTaskType(taskDefName); - boolean startKeyFound = startKey == null; - int found = 0; - for (TaskModel pendingTask : pendingTasks) { - if (!startKeyFound) { - if (pendingTask.getTaskId().equals(startKey)) { - startKeyFound = true; - // noinspection ConstantConditions - if (startKey != null) { - continue; - } - } - } - if (startKeyFound && found < count) { - tasks.add(pendingTask); - found++; - } - } - - return tasks; - } - - private static String taskKey(TaskModel task) { - return task.getReferenceTaskName() + "_" + task.getRetryCount(); - } - - @Override - public List createTasks(List tasks) { - List created = Lists.newArrayListWithCapacity(tasks.size()); - - withTransaction( - connection -> { - for (TaskModel task : tasks) { - validate(task); - - task.setScheduledTime(System.currentTimeMillis()); - - final String taskKey = taskKey(task); - - boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey); - - if (!scheduledTaskAdded) { - logger.trace( - "Task already scheduled, skipping the run " - + task.getTaskId() - + ", ref=" - + task.getReferenceTaskName() - + ", key=" - + taskKey); - continue; - } - - insertOrUpdateTaskData(connection, task); - addWorkflowToTaskMapping(connection, task); - addTaskInProgress(connection, task); - updateTask(connection, task); - - created.add(task); - } - }); - - return created; - } - - @Override - public void updateTask(TaskModel task) { - withTransaction(connection -> updateTask(connection, task)); - } - - /** - * This is a dummy implementation and this feature is not for Mysql backed Conductor - * - * @param task: which needs to be evaluated whether it is rateLimited or not - */ - @Override - public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) { - return false; - } - - @Override - public boolean exceedsLimit(TaskModel task) { - - Optional taskDefinition = task.getTaskDefinition(); - if (taskDefinition.isEmpty()) { - return false; - } - - TaskDef taskDef = taskDefinition.get(); - - int limit = taskDef.concurrencyLimit(); - if (limit <= 0) { - return false; - } - - long current = getInProgressTaskCount(task.getTaskDefName()); - - if (current >= limit) { - Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); - return true; - } - - logger.info( - "Task execution count for {}: limit={}, current={}", - task.getTaskDefName(), - limit, - getInProgressTaskCount(task.getTaskDefName())); - - String taskId = task.getTaskId(); - - List tasksInProgressInOrderOfArrival = - findAllTasksInProgressInOrderOfArrival(task, limit); - - boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId); - - if (rateLimited) { - logger.info( - "Task execution count limited. {}, limit {}, current {}", - task.getTaskDefName(), - limit, - getInProgressTaskCount(task.getTaskDefName())); - Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); - } - - return rateLimited; - } - - @Override - public boolean removeTask(String taskId) { - TaskModel task = getTask(taskId); - - if (task == null) { - logger.warn("No such task found by id {}", taskId); - return false; - } - - final String taskKey = taskKey(task); - - withTransaction( - connection -> { - removeScheduledTask(connection, task, taskKey); - removeWorkflowToTaskMapping(connection, task); - removeTaskInProgress(connection, task); - removeTaskData(connection, task); - }); - return true; - } - - @Override - public TaskModel getTask(String taskId) { - String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?"; - return queryWithTransaction( - GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class)); - } - - @Override - public List getTasks(List taskIds) { - if (taskIds.isEmpty()) { - return Lists.newArrayList(); - } - return getWithRetriedTransactions(c -> getTasks(c, taskIds)); - } - - @Override - public List getPendingTasksForTaskType(String taskName) { - Preconditions.checkNotNull(taskName, "task name cannot be null"); - // @formatter:off - String GET_IN_PROGRESS_TASKS_FOR_TYPE = - "SELECT json_data FROM task_in_progress tip " - + "INNER JOIN task t ON t.task_id = tip.task_id " - + "WHERE task_def_name = ?"; - // @formatter:on - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_FOR_TYPE, - q -> q.addParameter(taskName).executeAndFetch(TaskModel.class)); - } - - @Override - public List getTasksForWorkflow(String workflowId) { - String GET_TASKS_FOR_WORKFLOW = - "SELECT task_id FROM workflow_to_task WHERE workflow_id = ?"; - return getWithRetriedTransactions( - tx -> - query( - tx, - GET_TASKS_FOR_WORKFLOW, - q -> { - List taskIds = - q.addParameter(workflowId) - .executeScalarList(String.class); - return getTasks(tx, taskIds); - })); - } - - @Override - public String createWorkflow(WorkflowModel workflow) { - return insertOrUpdateWorkflow(workflow, false); - } - - @Override - public String updateWorkflow(WorkflowModel workflow) { - return insertOrUpdateWorkflow(workflow, true); - } - - @Override - public boolean removeWorkflow(String workflowId) { - boolean removed = false; - WorkflowModel workflow = getWorkflow(workflowId, true); - if (workflow != null) { - withTransaction( - connection -> { - removeWorkflowDefToWorkflowMapping(connection, workflow); - removeWorkflow(connection, workflowId); - removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId); - }); - removed = true; - - for (TaskModel task : workflow.getTasks()) { - if (!removeTask(task.getTaskId())) { - removed = false; - } - } - } - return removed; - } - - /** - * This is a dummy implementation and this feature is not supported for MySQL backed Conductor - */ - @Override - public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { - throw new UnsupportedOperationException( - "This method is not implemented in MySQLExecutionDAO. Please use RedisDAO mode instead for using TTLs."); - } - - @Override - public void removeFromPendingWorkflow(String workflowType, String workflowId) { - withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId)); - } - - @Override - public WorkflowModel getWorkflow(String workflowId) { - return getWorkflow(workflowId, true); - } - - @Override - public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { - WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId)); - - if (workflow != null) { - if (includeTasks) { - List tasks = getTasksForWorkflow(workflowId); - tasks.sort(Comparator.comparingInt(TaskModel::getSeq)); - workflow.setTasks(tasks); - } - } - return workflow; - } - - /** - * @param workflowName name of the workflow - * @param version the workflow version - * @return list of workflow ids that are in RUNNING state returns workflows of all versions - * for the given workflow name - */ - @Override - public List getRunningWorkflowIds(String workflowName, int version) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - String GET_PENDING_WORKFLOW_IDS = - "SELECT workflow_id FROM workflow_pending WHERE workflow_type = ?"; - - return queryWithTransaction( - GET_PENDING_WORKFLOW_IDS, - q -> q.addParameter(workflowName).executeScalarList(String.class)); - } - - /** - * @param workflowName Name of the workflow - * @param version the workflow version - * @return list of workflows that are in RUNNING state - */ - @Override - public List getPendingWorkflowsByType(String workflowName, int version) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - return getRunningWorkflowIds(workflowName, version).stream() - .map(this::getWorkflow) - .filter(workflow -> workflow.getWorkflowVersion() == version) - .collect(Collectors.toList()); - } - - @Override - public long getPendingWorkflowCount(String workflowName) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - String GET_PENDING_WORKFLOW_COUNT = - "SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?"; - - return queryWithTransaction( - GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount()); - } - - @Override - public long getInProgressTaskCount(String taskDefName) { - String GET_IN_PROGRESS_TASK_COUNT = - "SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true"; - - return queryWithTransaction( - GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount()); - } - - @Override - public List getWorkflowsByType( - String workflowName, Long startTime, Long endTime) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - Preconditions.checkNotNull(startTime, "startTime cannot be null"); - Preconditions.checkNotNull(endTime, "endTime cannot be null"); - - List workflows = new LinkedList<>(); - - withTransaction( - tx -> { - // @formatter:off - String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF = - "SELECT workflow_id FROM workflow_def_to_workflow " - + "WHERE workflow_def = ? AND date_str BETWEEN ? AND ?"; - // @formatter:on - - List workflowIds = - query( - tx, - GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF, - q -> - q.addParameter(workflowName) - .addParameter(dateStr(startTime)) - .addParameter(dateStr(endTime)) - .executeScalarList(String.class)); - workflowIds.forEach( - workflowId -> { - try { - WorkflowModel wf = getWorkflow(workflowId); - if (wf.getCreateTime() >= startTime - && wf.getCreateTime() <= endTime) { - workflows.add(wf); - } - } catch (Exception e) { - logger.error( - "Unable to load workflow id {} with name {}", - workflowId, - workflowName, - e); - } - }); - }); - - return workflows; - } - - @Override - public List getWorkflowsByCorrelationId( - String workflowName, String correlationId, boolean includeTasks) { - Preconditions.checkNotNull(correlationId, "correlationId cannot be null"); - String GET_WORKFLOWS_BY_CORRELATION_ID = - "SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ?"; - - return queryWithTransaction( - GET_WORKFLOWS_BY_CORRELATION_ID, - q -> - q.addParameter(correlationId) - .addParameter(workflowName) - .executeAndFetch(WorkflowModel.class)); - } - - @Override - public boolean canSearchAcrossWorkflows() { - return true; - } - - @Override - public boolean addEventExecution(EventExecution eventExecution) { - try { - return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to add event execution " + eventExecution.getId(), - e); - } - } - - @Override - public void removeEventExecution(EventExecution eventExecution) { - try { - withTransaction(tx -> removeEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to remove event execution " + eventExecution.getId(), - e); - } - } - - @Override - public void updateEventExecution(EventExecution eventExecution) { - try { - withTransaction(tx -> updateEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to update event execution " + eventExecution.getId(), - e); - } - } - - public List getEventExecutions( - String eventHandlerName, String eventName, String messageId, int max) { - try { - List executions = Lists.newLinkedList(); - withTransaction( - tx -> { - for (int i = 0; i < max; i++) { - String executionId = - messageId + "_" - + i; // see SimpleEventProcessor.handle to understand - // how the - // execution id is set - EventExecution ee = - readEventExecution( - tx, - eventHandlerName, - eventName, - messageId, - executionId); - if (ee == null) { - break; - } - executions.add(ee); - } - }); - return executions; - } catch (Exception e) { - String message = - String.format( - "Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s", - eventHandlerName, eventName, messageId); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, message, e); - } - } - - @Override - public void updateLastPollData(String taskDefName, String domain, String workerId) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis()); - String effectiveDomain = (domain == null) ? "DEFAULT" : domain; - withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain)); - } - - @Override - public PollData getPollData(String taskDefName, String domain) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - String effectiveDomain = (domain == null) ? "DEFAULT" : domain; - return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain)); - } - - @Override - public List getPollData(String taskDefName) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - return readAllPollData(taskDefName); - } - - @Override - public List getAllPollData() { - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(true); - try { - String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name"; - return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class)); - } catch (Throwable th) { - throw new ApplicationException(BACKEND_ERROR, th.getMessage(), th); - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } - } - - private List getTasks(Connection connection, List taskIds) { - if (taskIds.isEmpty()) { - return Lists.newArrayList(); - } - - // Generate a formatted query string with a variable number of bind params based - // on taskIds.size() - final String GET_TASKS_FOR_IDS = - String.format( - "SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL", - Query.generateInBindings(taskIds.size())); - - return query( - connection, - GET_TASKS_FOR_IDS, - q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class)); - } - - private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) { - Preconditions.checkNotNull(workflow, "workflow object cannot be null"); - - boolean terminal = workflow.getStatus().isTerminal(); - - List tasks = workflow.getTasks(); - workflow.setTasks(Lists.newLinkedList()); - - withTransaction( - tx -> { - if (!update) { - addWorkflow(tx, workflow); - addWorkflowDefToWorkflowMapping(tx, workflow); - } else { - updateWorkflow(tx, workflow); - } - - if (terminal) { - removePendingWorkflow( - tx, workflow.getWorkflowName(), workflow.getWorkflowId()); - } else { - addPendingWorkflow( - tx, workflow.getWorkflowName(), workflow.getWorkflowId()); - } - }); - - workflow.setTasks(tasks); - return workflow.getWorkflowId(); - } - - private void updateTask(Connection connection, TaskModel task) { - Optional taskDefinition = task.getTaskDefinition(); - - if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { - boolean inProgress = - task.getStatus() != null - && task.getStatus().equals(TaskModel.Status.IN_PROGRESS); - updateInProgressStatus(connection, task, inProgress); - } - - insertOrUpdateTaskData(connection, task); - - if (task.getStatus() != null && task.getStatus().isTerminal()) { - removeTaskInProgress(connection, task); - } - - addWorkflowToTaskMapping(connection, task); - } - - private WorkflowModel readWorkflow(Connection connection, String workflowId) { - String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?"; - - return query( - connection, - GET_WORKFLOW, - q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class)); - } - - private void addWorkflow(Connection connection, WorkflowModel workflow) { - String INSERT_WORKFLOW = - "INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowId()) - .addParameter(workflow.getCorrelationId()) - .addJsonParameter(workflow) - .executeUpdate()); - } - - private void updateWorkflow(Connection connection, WorkflowModel workflow) { - String UPDATE_WORKFLOW = - "UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?"; - - execute( - connection, - UPDATE_WORKFLOW, - q -> - q.addJsonParameter(workflow) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - private void removeWorkflow(Connection connection, String workflowId) { - String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?"; - execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete()); - } - - private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) { - - String EXISTS_PENDING_WORKFLOW = - "SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).exists()); - - if (!exists) { - String INSERT_PENDING_WORKFLOW = - "INSERT IGNORE INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?)"; - - execute( - connection, - INSERT_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate()); - } - } - - private void removePendingWorkflow( - Connection connection, String workflowType, String workflowId) { - String REMOVE_PENDING_WORKFLOW = - "DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?"; - - execute( - connection, - REMOVE_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete()); - } - - private void insertOrUpdateTaskData(Connection connection, TaskModel task) { - /* - * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON DUPLICATE KEY update' sql statement. The problem with that - * is that if we try the INSERT first, the sequence will be increased even if the ON DUPLICATE KEY happens. - */ - String UPDATE_TASK = - "UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?"; - int rowsUpdated = - query( - connection, - UPDATE_TASK, - q -> - q.addJsonParameter(task) - .addParameter(task.getTaskId()) - .executeUpdate()); - - if (rowsUpdated == 0) { - String INSERT_TASK = - "INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; - execute( - connection, - INSERT_TASK, - q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate()); - } - } - - private void removeTaskData(Connection connection, TaskModel task) { - String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?"; - execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete()); - } - - private void addWorkflowToTaskMapping(Connection connection, TaskModel task) { - - String EXISTS_WORKFLOW_TO_TASK = - "SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .exists()); - - if (!exists) { - String INSERT_WORKFLOW_TO_TASK = - "INSERT IGNORE INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?)"; - - execute( - connection, - INSERT_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - } - - private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) { - String REMOVE_WORKFLOW_TO_TASK = - "DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?"; - - execute( - connection, - REMOVE_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .executeDelete()); - } - - private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { - String INSERT_WORKFLOW_DEF_TO_WORKFLOW = - "INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_WORKFLOW_DEF_TO_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowName()) - .addParameter(dateStr(workflow.getCreateTime())) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { - String REMOVE_WORKFLOW_DEF_TO_WORKFLOW = - "DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?"; - - execute( - connection, - REMOVE_WORKFLOW_DEF_TO_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowName()) - .addParameter(dateStr(workflow.getCreateTime())) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - @VisibleForTesting - boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) { - - final String EXISTS_SCHEDULED_TASK = - "SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)"; - - boolean exists = - query( - connection, - EXISTS_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .exists()); - - if (!exists) { - final String INSERT_IGNORE_SCHEDULED_TASK = - "INSERT IGNORE INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?)"; - - int count = - query( - connection, - INSERT_IGNORE_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .addParameter(task.getTaskId()) - .executeUpdate()); - return count > 0; - } else { - return false; - } - } - - private void removeScheduledTask(Connection connection, TaskModel task, String taskKey) { - String REMOVE_SCHEDULED_TASK = - "DELETE FROM task_scheduled WHERE workflow_id = ? AND task_key = ?"; - execute( - connection, - REMOVE_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .executeDelete()); - } - - private void addTaskInProgress(Connection connection, TaskModel task) { - String EXISTS_IN_PROGRESS_TASK = - "SELECT EXISTS(SELECT 1 FROM task_in_progress WHERE task_def_name = ? AND task_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .exists()); - - if (!exists) { - String INSERT_IN_PROGRESS_TASK = - "INSERT INTO task_in_progress (task_def_name, task_id, workflow_id) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .addParameter(task.getWorkflowInstanceId()) - .executeUpdate()); - } - } - - private void removeTaskInProgress(Connection connection, TaskModel task) { - String REMOVE_IN_PROGRESS_TASK = - "DELETE FROM task_in_progress WHERE task_def_name = ? AND task_id = ?"; - - execute( - connection, - REMOVE_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - - private void updateInProgressStatus(Connection connection, TaskModel task, boolean inProgress) { - String UPDATE_IN_PROGRESS_TASK_STATUS = - "UPDATE task_in_progress SET in_progress_status = ?, modified_on = CURRENT_TIMESTAMP " - + "WHERE task_def_name = ? AND task_id = ?"; - - execute( - connection, - UPDATE_IN_PROGRESS_TASK_STATUS, - q -> - q.addParameter(inProgress) - .addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - - private boolean insertEventExecution(Connection connection, EventExecution eventExecution) { - - String INSERT_EVENT_EXECUTION = - "INSERT INTO event_execution (event_handler_name, event_name, message_id, execution_id, json_data) " - + "VALUES (?, ?, ?, ?, ?)"; - int count = - query( - connection, - INSERT_EVENT_EXECUTION, - q -> - q.addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .addJsonParameter(eventExecution) - .executeUpdate()); - return count > 0; - } - - private void updateEventExecution(Connection connection, EventExecution eventExecution) { - // @formatter:off - String UPDATE_EVENT_EXECUTION = - "UPDATE event_execution SET " - + "json_data = ?, " - + "modified_on = CURRENT_TIMESTAMP " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - // @formatter:on - - execute( - connection, - UPDATE_EVENT_EXECUTION, - q -> - q.addJsonParameter(eventExecution) - .addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .executeUpdate()); - } - - private void removeEventExecution(Connection connection, EventExecution eventExecution) { - String REMOVE_EVENT_EXECUTION = - "DELETE FROM event_execution " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - - execute( - connection, - REMOVE_EVENT_EXECUTION, - q -> - q.addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .executeUpdate()); - } - - private EventExecution readEventExecution( - Connection connection, - String eventHandlerName, - String eventName, - String messageId, - String executionId) { - // @formatter:off - String GET_EVENT_EXECUTION = - "SELECT json_data FROM event_execution " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - // @formatter:on - return query( - connection, - GET_EVENT_EXECUTION, - q -> - q.addParameter(eventHandlerName) - .addParameter(eventName) - .addParameter(messageId) - .addParameter(executionId) - .executeAndFetchFirst(EventExecution.class)); - } - - private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) { - - /* - * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON DUPLICATE KEY update' sql statement. The problem with that - * is that if we try the INSERT first, the sequence will be increased even if the ON DUPLICATE KEY happens. Since polling happens *a lot*, the sequence can increase - * dramatically even though it won't be used. - */ - String UPDATE_POLL_DATA = - "UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?"; - int rowsUpdated = - query( - connection, - UPDATE_POLL_DATA, - q -> - q.addJsonParameter(pollData) - .addParameter(pollData.getQueueName()) - .addParameter(domain) - .executeUpdate()); - - if (rowsUpdated == 0) { - String INSERT_POLL_DATA = - "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON DUPLICATE KEY UPDATE json_data=VALUES(json_data), modified_on=VALUES(modified_on)"; - execute( - connection, - INSERT_POLL_DATA, - q -> - q.addParameter(pollData.getQueueName()) - .addParameter(domain) - .addJsonParameter(pollData) - .executeUpdate()); - } - } - - private PollData readPollData(Connection connection, String queueName, String domain) { - String GET_POLL_DATA = - "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?"; - return query( - connection, - GET_POLL_DATA, - q -> - q.addParameter(queueName) - .addParameter(domain) - .executeAndFetchFirst(PollData.class)); - } - - private List readAllPollData(String queueName) { - String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?"; - return queryWithTransaction( - GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class)); - } - - private List findAllTasksInProgressInOrderOfArrival(TaskModel task, int limit) { - String GET_IN_PROGRESS_TASKS_WITH_LIMIT = - "SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY created_on LIMIT ?"; - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_WITH_LIMIT, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(limit) - .executeScalarList(String.class)); - } - - private void validate(TaskModel task) { - Preconditions.checkNotNull(task, "task object cannot be null"); - Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); - Preconditions.checkNotNull( - task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); - Preconditions.checkNotNull( - task.getReferenceTaskName(), "Task reference name cannot be null"); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java deleted file mode 100644 index ca26a51661..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAO.java +++ /dev/null @@ -1,555 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.sql.Connection; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Optional; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.common.metadata.events.EventHandler; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.dao.EventHandlerDAO; -import com.netflix.conductor.dao.MetadataDAO; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.mysql.config.MySQLProperties; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Preconditions; - -public class MySQLMetadataDAO extends MySQLBaseDAO implements MetadataDAO, EventHandlerDAO { - - private final ConcurrentHashMap taskDefCache = new ConcurrentHashMap<>(); - private static final String CLASS_NAME = MySQLMetadataDAO.class.getSimpleName(); - - public MySQLMetadataDAO( - RetryTemplate retryTemplate, - ObjectMapper objectMapper, - DataSource dataSource, - MySQLProperties properties) { - super(retryTemplate, objectMapper, dataSource); - - long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds(); - Executors.newSingleThreadScheduledExecutor() - .scheduleWithFixedDelay( - this::refreshTaskDefs, - cacheRefreshTime, - cacheRefreshTime, - TimeUnit.SECONDS); - } - - @Override - public void createTaskDef(TaskDef taskDef) { - validate(taskDef); - insertOrUpdateTaskDef(taskDef); - } - - @Override - public String updateTaskDef(TaskDef taskDef) { - validate(taskDef); - return insertOrUpdateTaskDef(taskDef); - } - - @Override - public TaskDef getTaskDef(String name) { - Preconditions.checkNotNull(name, "TaskDef name cannot be null"); - TaskDef taskDef = taskDefCache.get(name); - if (taskDef == null) { - if (logger.isTraceEnabled()) { - logger.trace("Cache miss: {}", name); - } - taskDef = getTaskDefFromDB(name); - } - - return taskDef; - } - - @Override - public List getAllTaskDefs() { - return getWithRetriedTransactions(this::findAllTaskDefs); - } - - @Override - public void removeTaskDef(String name) { - final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?"; - - executeWithTransaction( - DELETE_TASKDEF_QUERY, - q -> { - if (!q.addParameter(name).executeDelete()) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, "No such task definition"); - } - - taskDefCache.remove(name); - }); - } - - @Override - public void createWorkflowDef(WorkflowDef def) { - validate(def); - - withTransaction( - tx -> { - if (workflowExists(tx, def)) { - throw new ApplicationException( - ApplicationException.Code.CONFLICT, - "Workflow with " + def.key() + " already exists!"); - } - - insertOrUpdateWorkflowDef(tx, def); - }); - } - - @Override - public void updateWorkflowDef(WorkflowDef def) { - validate(def); - withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def)); - } - - @Override - public Optional getLatestWorkflowDef(String name) { - final String GET_LATEST_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND " - + "version = latest_version"; - - return Optional.ofNullable( - queryWithTransaction( - GET_LATEST_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class))); - } - - @Override - public Optional getWorkflowDef(String name, int version) { - final String GET_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?"; - return Optional.ofNullable( - queryWithTransaction( - GET_WORKFLOW_DEF_QUERY, - q -> - q.addParameter(name) - .addParameter(version) - .executeAndFetchFirst(WorkflowDef.class))); - } - - @Override - public void removeWorkflowDef(String name, Integer version) { - final String DELETE_WORKFLOW_QUERY = - "DELETE from meta_workflow_def WHERE name = ? AND version = ?"; - - withTransaction( - tx -> { - // remove specified workflow - execute( - tx, - DELETE_WORKFLOW_QUERY, - q -> { - if (!q.addParameter(name).addParameter(version).executeDelete()) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - String.format( - "No such workflow definition: %s version: %d", - name, version)); - } - }); - // reset latest version based on remaining rows for this workflow - Optional maxVersion = getLatestVersion(tx, name); - maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion)); - }); - } - - public List findAll() { - final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def"; - return queryWithTransaction( - FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class)); - } - - @Override - public List getAllWorkflowDefs() { - final String GET_ALL_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def ORDER BY name, version"; - - return queryWithTransaction( - GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); - } - - public List getAllLatest() { - final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version"; - - return queryWithTransaction( - GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); - } - - public List getAllVersions(String name) { - final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version"; - - return queryWithTransaction( - GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetch(WorkflowDef.class)); - } - - @Override - public void addEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); - - final String INSERT_EVENT_HANDLER_QUERY = - "INSERT INTO meta_event_handler (name, event, active, json_data) " - + "VALUES (?, ?, ?, ?)"; - - withTransaction( - tx -> { - if (getEventHandler(tx, eventHandler.getName()) != null) { - throw new ApplicationException( - ApplicationException.Code.CONFLICT, - "EventHandler with name " - + eventHandler.getName() - + " already exists!"); - } - - execute( - tx, - INSERT_EVENT_HANDLER_QUERY, - q -> - q.addParameter(eventHandler.getName()) - .addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .executeUpdate()); - }); - } - - @Override - public void updateEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); - - // @formatter:off - final String UPDATE_EVENT_HANDLER_QUERY = - "UPDATE meta_event_handler SET " - + "event = ?, active = ?, json_data = ?, " - + "modified_on = CURRENT_TIMESTAMP WHERE name = ?"; - // @formatter:on - - withTransaction( - tx -> { - EventHandler existing = getEventHandler(tx, eventHandler.getName()); - if (existing == null) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + eventHandler.getName() + " not found!"); - } - - execute( - tx, - UPDATE_EVENT_HANDLER_QUERY, - q -> - q.addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .addParameter(eventHandler.getName()) - .executeUpdate()); - }); - } - - @Override - public void removeEventHandler(String name) { - final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?"; - - withTransaction( - tx -> { - EventHandler existing = getEventHandler(tx, name); - if (existing == null) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + name + " not found!"); - } - - execute( - tx, - DELETE_EVENT_HANDLER_QUERY, - q -> q.addParameter(name).executeDelete()); - }); - } - - @Override - public List getAllEventHandlers() { - final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler"; - return queryWithTransaction( - READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class)); - } - - @Override - public List getEventHandlersForEvent(String event, boolean activeOnly) { - final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY = - "SELECT json_data FROM meta_event_handler WHERE event = ?"; - return queryWithTransaction( - READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY, - q -> { - q.addParameter(event); - return q.executeAndFetch( - rs -> { - List handlers = new ArrayList<>(); - while (rs.next()) { - EventHandler h = readValue(rs.getString(1), EventHandler.class); - if (!activeOnly || h.isActive()) { - handlers.add(h); - } - } - - return handlers; - }); - }); - } - - /** - * Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime - * exception if validations fail. - * - * @param taskDef The {@code TaskDef} to check. - */ - private void validate(TaskDef taskDef) { - Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null"); - Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null"); - } - - /** - * Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a - * Runtime exception if validations fail. - * - * @param def The {@code WorkflowDef} to check. - */ - private void validate(WorkflowDef def) { - Preconditions.checkNotNull(def, "WorkflowDef object cannot be null"); - Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null"); - } - - /** - * Retrieve a {@link EventHandler} by {@literal name}. - * - * @param connection The {@link Connection} to use for queries. - * @param name The {@code EventHandler} name to look for. - * @return {@literal null} if nothing is found, otherwise the {@code EventHandler}. - */ - private EventHandler getEventHandler(Connection connection, String name) { - final String READ_ONE_EVENT_HANDLER_QUERY = - "SELECT json_data FROM meta_event_handler WHERE name = ?"; - - return query( - connection, - READ_ONE_EVENT_HANDLER_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class)); - } - - /** - * Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already - * exist. - * - * @param connection The {@link Connection} to use for queries. - * @param def The {@code WorkflowDef} to check for. - * @return {@literal true} if a {@code WorkflowDef} already exists with the same values. - */ - private Boolean workflowExists(Connection connection, WorkflowDef def) { - final String CHECK_WORKFLOW_DEF_EXISTS_QUERY = - "SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?"; - - return query( - connection, - CHECK_WORKFLOW_DEF_EXISTS_QUERY, - q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists()); - } - - /** - * Return the latest version that exists for the provided {@code name}. - * - * @param tx The {@link Connection} to use for queries. - * @param name The {@code name} to check for. - * @return {@code Optional.empty()} if no versions exist, otherwise the max {@link - * WorkflowDef#getVersion} found. - */ - private Optional getLatestVersion(Connection tx, String name) { - final String GET_LATEST_WORKFLOW_DEF_VERSION = - "SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?"; - - Integer val = - query( - tx, - GET_LATEST_WORKFLOW_DEF_VERSION, - q -> { - q.addParameter(name); - return q.executeAndFetch( - rs -> { - if (!rs.next()) { - return null; - } - - return rs.getInt(1); - }); - }); - - return Optional.ofNullable(val); - } - - /** - * Update the latest version for the workflow with name {@code WorkflowDef} to the version - * provided in {@literal version}. - * - * @param tx The {@link Connection} to use for queries. - * @param name Workflow def name to update - * @param version The new latest {@code version} value. - */ - private void updateLatestVersion(Connection tx, String name, int version) { - final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY = - "UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?"; - - execute( - tx, - UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY, - q -> q.addParameter(version).addParameter(name).executeUpdate()); - } - - private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) { - final String INSERT_WORKFLOW_DEF_QUERY = - "INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)"; - - Optional version = getLatestVersion(tx, def.getName()); - if (!workflowExists(tx, def)) { - execute( - tx, - INSERT_WORKFLOW_DEF_QUERY, - q -> - q.addParameter(def.getName()) - .addParameter(def.getVersion()) - .addJsonParameter(def) - .executeUpdate()); - } else { - // @formatter:off - final String UPDATE_WORKFLOW_DEF_QUERY = - "UPDATE meta_workflow_def " - + "SET json_data = ?, modified_on = CURRENT_TIMESTAMP " - + "WHERE name = ? AND version = ?"; - // @formatter:on - - execute( - tx, - UPDATE_WORKFLOW_DEF_QUERY, - q -> - q.addJsonParameter(def) - .addParameter(def.getName()) - .addParameter(def.getVersion()) - .executeUpdate()); - } - int maxVersion = def.getVersion(); - if (version.isPresent() && version.get() > def.getVersion()) { - maxVersion = version.get(); - } - - updateLatestVersion(tx, def.getName(), maxVersion); - } - - /** - * Query persistence for all defined {@link TaskDef} data, and cache it in {@link - * #taskDefCache}. - */ - private void refreshTaskDefs() { - try { - withTransaction( - tx -> { - Map map = new HashMap<>(); - findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef)); - - synchronized (taskDefCache) { - taskDefCache.clear(); - taskDefCache.putAll(map); - } - - if (logger.isTraceEnabled()) { - logger.trace("Refreshed {} TaskDefs", taskDefCache.size()); - } - }); - } catch (Exception e) { - Monitors.error(CLASS_NAME, "refreshTaskDefs"); - logger.error("refresh TaskDefs failed ", e); - } - } - - /** - * Query persistence for all defined {@link TaskDef} data. - * - * @param tx The {@link Connection} to use for queries. - * @return A new {@code List} with all the {@code TaskDef} data that was retrieved. - */ - private List findAllTaskDefs(Connection tx) { - final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def"; - - return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class)); - } - - /** - * Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}. - * - * @param name The name of the {@code TaskDef} to query for. - * @return {@literal null} if nothing is found, otherwise the {@code TaskDef}. - */ - private TaskDef getTaskDefFromDB(String name) { - final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?"; - - return queryWithTransaction( - READ_ONE_TASKDEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class)); - } - - private String insertOrUpdateTaskDef(TaskDef taskDef) { - final String UPDATE_TASKDEF_QUERY = - "UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?"; - - final String INSERT_TASKDEF_QUERY = - "INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)"; - - return getWithRetriedTransactions( - tx -> { - execute( - tx, - UPDATE_TASKDEF_QUERY, - update -> { - int result = - update.addJsonParameter(taskDef) - .addParameter(taskDef.getName()) - .executeUpdate(); - if (result == 0) { - execute( - tx, - INSERT_TASKDEF_QUERY, - insert -> - insert.addParameter(taskDef.getName()) - .addJsonParameter(taskDef) - .executeUpdate()); - } - }); - - taskDefCache.put(taskDef.getName(), taskDef); - return taskDef.getName(); - }); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java deleted file mode 100644 index cd068a228b..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/dao/MySQLQueueDAO.java +++ /dev/null @@ -1,395 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Map; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.mysql.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.Uninterruptibles; - -public class MySQLQueueDAO extends MySQLBaseDAO implements QueueDAO { - - private static final Long UNACK_SCHEDULE_MS = 60_000L; - - public MySQLQueueDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - super(retryTemplate, objectMapper, dataSource); - - Executors.newSingleThreadScheduledExecutor() - .scheduleAtFixedRate( - this::processAllUnacks, - UNACK_SCHEDULE_MS, - UNACK_SCHEDULE_MS, - TimeUnit.MILLISECONDS); - logger.debug(MySQLQueueDAO.class.getName() + " is ready to serve"); - } - - @Override - public void push(String queueName, String messageId, long offsetTimeInSecond) { - push(queueName, messageId, 0, offsetTimeInSecond); - } - - @Override - public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) { - withTransaction( - tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond)); - } - - @Override - public void push(String queueName, List messages) { - withTransaction( - tx -> - messages.forEach( - message -> - pushMessage( - tx, - queueName, - message.getId(), - message.getPayload(), - message.getPriority(), - 0))); - } - - @Override - public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) { - return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond); - } - - @Override - public boolean pushIfNotExists( - String queueName, String messageId, int priority, long offsetTimeInSecond) { - return getWithRetriedTransactions( - tx -> { - if (!existsMessage(tx, queueName, messageId)) { - pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond); - return true; - } - return false; - }); - } - - @Override - public List pop(String queueName, int count, int timeout) { - List messages = - getWithTransactionWithOutErrorPropagation( - tx -> popMessages(tx, queueName, count, timeout)); - if (messages == null) { - return new ArrayList<>(); - } - return messages.stream().map(Message::getId).collect(Collectors.toList()); - } - - @Override - public List pollMessages(String queueName, int count, int timeout) { - List messages = - getWithTransactionWithOutErrorPropagation( - tx -> popMessages(tx, queueName, count, timeout)); - if (messages == null) { - return new ArrayList<>(); - } - return messages; - } - - @Override - public void remove(String queueName, String messageId) { - withTransaction(tx -> removeMessage(tx, queueName, messageId)); - } - - @Override - public int getSize(String queueName) { - final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?"; - return queryWithTransaction( - GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue()); - } - - @Override - public boolean ack(String queueName, String messageId) { - return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId)); - } - - @Override - public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) { - long updatedOffsetTimeInSecond = unackTimeout / 1000; - - final String UPDATE_UNACK_TIMEOUT = - "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND, ?, CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?"; - - return queryWithTransaction( - UPDATE_UNACK_TIMEOUT, - q -> - q.addParameter(updatedOffsetTimeInSecond) - .addParameter(updatedOffsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate()) - == 1; - } - - @Override - public void flush(String queueName) { - final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?"; - executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete()); - } - - @Override - public Map queuesDetail() { - final String GET_QUEUES_DETAIL = - "SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q"; - return queryWithTransaction( - GET_QUEUES_DETAIL, - q -> - q.executeAndFetch( - rs -> { - Map detail = Maps.newHashMap(); - while (rs.next()) { - String queueName = rs.getString("queue_name"); - Long size = rs.getLong("size"); - detail.put(queueName, size); - } - return detail; - })); - } - - @Override - public Map>> queuesDetailVerbose() { - // @formatter:off - final String GET_QUEUES_DETAIL_VERBOSE = - "SELECT queue_name, \n" - + " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n" - + " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n" - + "FROM queue q"; - // @formatter:on - - return queryWithTransaction( - GET_QUEUES_DETAIL_VERBOSE, - q -> - q.executeAndFetch( - rs -> { - Map>> result = - Maps.newHashMap(); - while (rs.next()) { - String queueName = rs.getString("queue_name"); - Long size = rs.getLong("size"); - Long queueUnacked = rs.getLong("uacked"); - result.put( - queueName, - ImmutableMap.of( - "a", - ImmutableMap - .of( // sharding not implemented, - // returning only - // one shard with all the - // info - "size", - size, - "uacked", - queueUnacked))); - } - return result; - })); - } - - /** - * Un-pop all un-acknowledged messages for all queues. - * - * @since 1.11.6 - */ - public void processAllUnacks() { - - logger.trace("processAllUnacks started"); - - final String PROCESS_ALL_UNACKS = - "UPDATE queue_message SET popped = false WHERE popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on"; - executeWithTransaction(PROCESS_ALL_UNACKS, Query::executeUpdate); - } - - @Override - public void processUnacks(String queueName) { - final String PROCESS_UNACKS = - "UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND TIMESTAMPADD(SECOND,-60,CURRENT_TIMESTAMP) > deliver_on"; - executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate()); - } - - @Override - public boolean resetOffsetTime(String queueName, String messageId) { - long offsetTimeInSecond = 0; // Reset to 0 - final String SET_OFFSET_TIME = - "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) \n" - + "WHERE queue_name = ? AND message_id = ?"; - - return queryWithTransaction( - SET_OFFSET_TIME, - q -> - q.addParameter(offsetTimeInSecond) - .addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate() - == 1); - } - - private boolean existsMessage(Connection connection, String queueName, String messageId) { - final String EXISTS_MESSAGE = - "SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?)"; - return query( - connection, - EXISTS_MESSAGE, - q -> q.addParameter(queueName).addParameter(messageId).exists()); - } - - private void pushMessage( - Connection connection, - String queueName, - String messageId, - String payload, - Integer priority, - long offsetTimeInSecond) { - - createQueueIfNotExists(connection, queueName); - - String UPDATE_MESSAGE = - "UPDATE queue_message SET payload=?, deliver_on=TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP) WHERE queue_name = ? AND message_id = ?"; - int rowsUpdated = - query( - connection, - UPDATE_MESSAGE, - q -> - q.addParameter(payload) - .addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate()); - - if (rowsUpdated == 0) { - String PUSH_MESSAGE = - "INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES (TIMESTAMPADD(SECOND,?,CURRENT_TIMESTAMP), ?, ?,?,?,?) ON DUPLICATE KEY UPDATE payload=VALUES(payload), deliver_on=VALUES(deliver_on)"; - execute( - connection, - PUSH_MESSAGE, - q -> - q.addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .addParameter(priority) - .addParameter(offsetTimeInSecond) - .addParameter(payload) - .executeUpdate()); - } - } - - private boolean removeMessage(Connection connection, String queueName, String messageId) { - final String REMOVE_MESSAGE = - "DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?"; - return query( - connection, - REMOVE_MESSAGE, - q -> q.addParameter(queueName).addParameter(messageId).executeDelete()); - } - - private List peekMessages(Connection connection, String queueName, int count) { - if (count < 1) { - return Collections.emptyList(); - } - - final String PEEK_MESSAGES = - "SELECT message_id, priority, payload FROM queue_message use index(combo_queue_message) WHERE queue_name = ? AND popped = false AND deliver_on <= TIMESTAMPADD(MICROSECOND, 1000, CURRENT_TIMESTAMP) ORDER BY priority DESC, deliver_on, created_on LIMIT ?"; - - return query( - connection, - PEEK_MESSAGES, - p -> - p.addParameter(queueName) - .addParameter(count) - .executeAndFetch( - rs -> { - List results = new ArrayList<>(); - while (rs.next()) { - Message m = new Message(); - m.setId(rs.getString("message_id")); - m.setPriority(rs.getInt("priority")); - m.setPayload(rs.getString("payload")); - results.add(m); - } - return results; - })); - } - - private List popMessages( - Connection connection, String queueName, int count, int timeout) { - long start = System.currentTimeMillis(); - List messages = peekMessages(connection, queueName, count); - - while (messages.size() < count && ((System.currentTimeMillis() - start) < timeout)) { - Uninterruptibles.sleepUninterruptibly(200, TimeUnit.MILLISECONDS); - messages = peekMessages(connection, queueName, count); - } - - if (messages.isEmpty()) { - return messages; - } - - List poppedMessages = new ArrayList<>(); - for (Message message : messages) { - final String POP_MESSAGE = - "UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false"; - int result = - query( - connection, - POP_MESSAGE, - q -> - q.addParameter(queueName) - .addParameter(message.getId()) - .executeUpdate()); - - if (result == 1) { - poppedMessages.add(message); - } - } - return poppedMessages; - } - - private void createQueueIfNotExists(Connection connection, String queueName) { - logger.trace("Creating new queue '{}'", queueName); - final String EXISTS_QUEUE = "SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?)"; - boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists()); - if (!exists) { - final String CREATE_QUEUE = "INSERT IGNORE INTO queue (queue_name) VALUES (?)"; - execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate()); - } - } - - @Override - public boolean containsMessage(String queueName, String messageId) { - final String EXISTS_QUEUE = - "SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ? )"; - return queryWithTransaction( - EXISTS_QUEUE, q -> q.addParameter(queueName).addParameter(messageId).exists()); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java deleted file mode 100644 index e94d878f4e..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ExecuteFunction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.sql.SQLException; - -/** - * Functional interface for {@link Query} executions with no expected result. - * - * @author mustafa - */ -@FunctionalInterface -public interface ExecuteFunction { - - void apply(Query query) throws SQLException; -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java deleted file mode 100644 index af19d8de0a..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/LazyToString.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.util.function.Supplier; - -/** Functional class to support the lazy execution of a String result. */ -public class LazyToString { - - private final Supplier supplier; - - /** - * @param supplier Supplier to execute when {@link #toString()} is called. - */ - public LazyToString(Supplier supplier) { - this.supplier = supplier; - } - - @Override - public String toString() { - return supplier.get(); - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java deleted file mode 100644 index 6c89a8872f..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/Query.java +++ /dev/null @@ -1,628 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.lang3.math.NumberUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.core.exception.ApplicationException.Code; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -/** - * Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities. - * - *

This class simulates a parameter building pattern and all {@literal addParameter(*)} methods - * must be called in the proper order of their expected binding sequence. - * - * @author mustafa - */ -public class Query implements AutoCloseable { - private final Logger logger = LoggerFactory.getLogger(getClass()); - - /** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */ - protected final ObjectMapper objectMapper; - - /** The initial supplied query String that was used to prepare {@link #statement}. */ - private final String rawQuery; - - /** - * Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a - * parameter is added to the {@code PreparedStatement} {@link #statement}. - */ - private final AtomicInteger index = new AtomicInteger(1); - - /** The {@link PreparedStatement} that will be managed and executed by this class. */ - private final PreparedStatement statement; - - public Query(ObjectMapper objectMapper, Connection connection, String query) { - this.rawQuery = query; - this.objectMapper = objectMapper; - - try { - this.statement = connection.prepareStatement(query); - } catch (SQLException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, - "Cannot prepare statement for query: " + ex.getMessage(), - ex); - } - } - - /** - * Generate a String with {@literal count} number of '?' placeholders for {@link - * PreparedStatement} queries. - * - * @param count The number of '?' chars to generate. - * @return a comma delimited string of {@literal count} '?' binding placeholders. - */ - public static String generateInBindings(int count) { - String[] questions = new String[count]; - for (int i = 0; i < count; i++) { - questions[i] = "?"; - } - - return String.join(", ", questions); - } - - public Query addParameter(final String value) { - return addParameterInternal((ps, idx) -> ps.setString(idx, value)); - } - - public Query addParameter(final int value) { - return addParameterInternal((ps, idx) -> ps.setInt(idx, value)); - } - - public Query addParameter(final boolean value) { - return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value))); - } - - public Query addParameter(final long value) { - return addParameterInternal((ps, idx) -> ps.setLong(idx, value)); - } - - public Query addParameter(final double value) { - return addParameterInternal((ps, idx) -> ps.setDouble(idx, value)); - } - - public Query addParameter(Date date) { - return addParameterInternal((ps, idx) -> ps.setDate(idx, date)); - } - - public Query addParameter(Timestamp timestamp) { - return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp)); - } - - /** - * Serializes {@literal value} to a JSON string for persistence. - * - * @param value The value to serialize. - * @return {@literal this} - */ - public Query addJsonParameter(Object value) { - return addParameter(toJson(value)); - } - - /** - * Bind the given {@link java.util.Date} to the PreparedStatement as a {@link java.sql.Date}. - * - * @param date The {@literal java.util.Date} to bind. - * @return {@literal this} - */ - public Query addDateParameter(java.util.Date date) { - return addParameter(new Date(date.getTime())); - } - - /** - * Bind the given {@link java.util.Date} to the PreparedStatement as a {@link - * java.sql.Timestamp}. - * - * @param date The {@literal java.util.Date} to bind. - * @return {@literal this} - */ - public Query addTimestampParameter(java.util.Date date) { - return addParameter(new Timestamp(date.getTime())); - } - - /** - * Bind the given epoch millis to the PreparedStatement as a {@link java.sql.Timestamp}. - * - * @param epochMillis The epoch ms to create a new {@literal Timestamp} from. - * @return {@literal this} - */ - public Query addTimestampParameter(long epochMillis) { - return addParameter(new Timestamp(epochMillis)); - } - - /** - * Add a collection of primitive values at once, in the order of the collection. - * - * @param values The values to bind to the prepared statement. - * @return {@literal this} - * @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the - * collection. - * @see #addParameters(Object...) - */ - public Query addParameters(Collection values) { - return addParameters(values.toArray()); - } - - /** - * Add many primitive values at once. - * - * @param values The values to bind to the prepared statement. - * @return {@literal this} - * @throws IllegalArgumentException If a non-primitive/unsupported type is encountered. - */ - public Query addParameters(Object... values) { - for (Object v : values) { - if (v instanceof String) { - addParameter((String) v); - } else if (v instanceof Integer) { - addParameter((Integer) v); - } else if (v instanceof Long) { - addParameter((Long) v); - } else if (v instanceof Double) { - addParameter((Double) v); - } else if (v instanceof Boolean) { - addParameter((Boolean) v); - } else if (v instanceof Date) { - addParameter((Date) v); - } else if (v instanceof Timestamp) { - addParameter((Timestamp) v); - } else { - throw new IllegalArgumentException( - "Type " - + v.getClass().getName() - + " is not supported by automatic property assignment"); - } - } - - return this; - } - - /** - * Utility method for evaluating the prepared statement as a query to check the existence of a - * record using a numeric count or boolean return value. - * - *

The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result. - * - * @return {@literal true} If a count query returned more than 0 or an exists query returns - * {@literal true}. - * @throws ApplicationException If an unexpected return type cannot be evaluated to a {@code - * Boolean} result. - */ - public boolean exists() { - Object val = executeScalar(); - if (null == val) { - return false; - } - - if (val instanceof Number) { - return convertLong(val) > 0; - } - - if (val instanceof Boolean) { - return (Boolean) val; - } - - if (val instanceof String) { - return convertBoolean(val); - } - - throw new ApplicationException( - Code.BACKEND_ERROR, - "Expected a Numeric or Boolean scalar return value from the query, received " - + val.getClass().getName()); - } - - /** - * Convenience method for executing delete statements. - * - * @return {@literal true} if the statement affected 1 or more rows. - * @see #executeUpdate() - */ - public boolean executeDelete() { - int count = executeUpdate(); - if (count > 1) { - logger.trace("Removed {} row(s) for query {}", count, rawQuery); - } - - return count > 0; - } - - /** - * Convenience method for executing statements that return a single numeric value, typically - * {@literal SELECT COUNT...} style queries. - * - * @return The result of the query as a {@literal long}. - */ - public long executeCount() { - return executeScalar(Long.class); - } - - /** - * @return The result of {@link PreparedStatement#executeUpdate()} - */ - public int executeUpdate() { - try { - - Long start = null; - if (logger.isTraceEnabled()) { - start = System.currentTimeMillis(); - } - - final int val = this.statement.executeUpdate(); - - if (null != start && logger.isTraceEnabled()) { - long end = System.currentTimeMillis(); - logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery); - } - - return val; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex.getMessage(), ex); - } - } - - /** - * Execute a query from the PreparedStatement and return the ResultSet. - * - *

NOTE: The returned ResultSet must be closed/managed by the calling methods. - * - * @return {@link PreparedStatement#executeQuery()} - * @throws ApplicationException If any SQL errors occur. - */ - public ResultSet executeQuery() { - Long start = null; - if (logger.isTraceEnabled()) { - start = System.currentTimeMillis(); - } - - try { - return this.statement.executeQuery(); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } finally { - if (null != start && logger.isTraceEnabled()) { - long end = System.currentTimeMillis(); - logger.trace("[{}ms] {}", (end - start), rawQuery); - } - } - } - - /** - * @return The single result of the query as an Object. - */ - public Object executeScalar() { - try (ResultSet rs = executeQuery()) { - if (!rs.next()) { - return null; - } - return rs.getObject(1); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the PreparedStatement and return a single 'primitive' value from the ResultSet. - * - * @param returnType The type to return. - * @param The type parameter to return a List of. - * @return A single result from the execution of the statement, as a type of {@literal - * returnType}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public V executeScalar(Class returnType) { - try (ResultSet rs = executeQuery()) { - if (!rs.next()) { - Object value = null; - if (Integer.class == returnType) { - value = 0; - } else if (Long.class == returnType) { - value = 0L; - } else if (Boolean.class == returnType) { - value = false; - } - return returnType.cast(value); - } else { - return getScalarFromResultSet(rs, returnType); - } - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet. - * - * @param returnType The type Class return a List of. - * @param The type parameter to return a List of. - * @return A {@code List}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public List executeScalarList(Class returnType) { - try (ResultSet rs = executeQuery()) { - List values = new ArrayList<>(); - while (rs.next()) { - values.add(getScalarFromResultSet(rs, returnType)); - } - return values; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the statement and return only the first record from the result set. - * - * @param returnType The Class to return. - * @param The type parameter. - * @return An instance of {@literal } from the result set. - */ - public V executeAndFetchFirst(Class returnType) { - Object o = executeScalar(); - if (null == o) { - return null; - } - return convert(o, returnType); - } - - /** - * Execute the PreparedStatement and return a List of {@literal returnType} values from the - * ResultSet. - * - * @param returnType The type Class return a List of. - * @param The type parameter to return a List of. - * @return A {@code List}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public List executeAndFetch(Class returnType) { - try (ResultSet rs = executeQuery()) { - List list = new ArrayList<>(); - while (rs.next()) { - list.add(convert(rs.getObject(1), returnType)); - } - return list; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the query and pass the {@link ResultSet} to the given handler. - * - * @param handler The {@link ResultSetHandler} to execute. - * @param The return type of this method. - * @return The results of {@link ResultSetHandler#apply(ResultSet)}. - */ - public V executeAndFetch(ResultSetHandler handler) { - try (ResultSet rs = executeQuery()) { - return handler.apply(rs); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - @Override - public void close() { - try { - if (null != statement && !statement.isClosed()) { - statement.close(); - } - } catch (SQLException ex) { - logger.warn("Error closing prepared statement: {}", ex.getMessage()); - } - } - - protected final Query addParameterInternal(InternalParameterSetter setter) { - int index = getAndIncrementIndex(); - try { - setter.apply(this.statement, index); - return this; - } catch (SQLException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, "Could not apply bind parameter at index " + index, ex); - } - } - - protected V getScalarFromResultSet(ResultSet rs, Class returnType) throws SQLException { - Object value = null; - - if (Integer.class == returnType) { - value = rs.getInt(1); - } else if (Long.class == returnType) { - value = rs.getLong(1); - } else if (String.class == returnType) { - value = rs.getString(1); - } else if (Boolean.class == returnType) { - value = rs.getBoolean(1); - } else if (Double.class == returnType) { - value = rs.getDouble(1); - } else if (Date.class == returnType) { - value = rs.getDate(1); - } else if (Timestamp.class == returnType) { - value = rs.getTimestamp(1); - } else { - value = rs.getObject(1); - } - - if (null == value) { - throw new NullPointerException( - "Cannot get value from ResultSet of type " + returnType.getName()); - } - - return returnType.cast(value); - } - - protected V convert(Object value, Class returnType) { - if (Boolean.class == returnType) { - return returnType.cast(convertBoolean(value)); - } else if (Integer.class == returnType) { - return returnType.cast(convertInt(value)); - } else if (Long.class == returnType) { - return returnType.cast(convertLong(value)); - } else if (Double.class == returnType) { - return returnType.cast(convertDouble(value)); - } else if (String.class == returnType) { - return returnType.cast(convertString(value)); - } else if (value instanceof String) { - return fromJson((String) value, returnType); - } - - final String vName = value.getClass().getName(); - final String rName = returnType.getName(); - throw new ApplicationException( - Code.BACKEND_ERROR, "Cannot convert type " + vName + " to " + rName); - } - - protected Integer convertInt(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Integer) { - return (Integer) value; - } - - if (value instanceof Number) { - return ((Number) value).intValue(); - } - - return NumberUtils.toInt(value.toString()); - } - - protected Double convertDouble(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Double) { - return (Double) value; - } - - if (value instanceof Number) { - return ((Number) value).doubleValue(); - } - - return NumberUtils.toDouble(value.toString()); - } - - protected Long convertLong(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Long) { - return (Long) value; - } - - if (value instanceof Number) { - return ((Number) value).longValue(); - } - return NumberUtils.toLong(value.toString()); - } - - protected String convertString(Object value) { - if (null == value) { - return null; - } - - if (value instanceof String) { - return (String) value; - } - - return value.toString().trim(); - } - - protected Boolean convertBoolean(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Boolean) { - return (Boolean) value; - } - - if (value instanceof Number) { - return ((Number) value).intValue() != 0; - } - - String text = value.toString().trim(); - return "Y".equalsIgnoreCase(text) - || "YES".equalsIgnoreCase(text) - || "TRUE".equalsIgnoreCase(text) - || "T".equalsIgnoreCase(text) - || "1".equalsIgnoreCase(text); - } - - protected String toJson(Object value) { - if (null == value) { - return null; - } - - try { - return objectMapper.writeValueAsString(value); - } catch (JsonProcessingException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - protected V fromJson(String value, Class returnType) { - if (null == value) { - return null; - } - - try { - return objectMapper.readValue(value, returnType); - } catch (IOException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, - "Could not convert JSON '" + value + "' to " + returnType.getName(), - ex); - } - } - - protected final int getIndex() { - return index.get(); - } - - protected final int getAndIncrementIndex() { - return index.getAndIncrement(); - } - - @FunctionalInterface - private interface InternalParameterSetter { - - void apply(PreparedStatement ps, int idx) throws SQLException; - } -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java deleted file mode 100644 index 6a07550724..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/QueryFunction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.sql.SQLException; - -/** - * Functional interface for {@link Query} executions that return results. - * - * @author mustafa - */ -@FunctionalInterface -public interface QueryFunction { - - R apply(Query query) throws SQLException; -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java deleted file mode 100644 index 7e7b422b31..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/ResultSetHandler.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.sql.ResultSet; -import java.sql.SQLException; - -/** - * Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}. - * - * @author mustafa - */ -@FunctionalInterface -public interface ResultSetHandler { - - R apply(ResultSet resultSet) throws SQLException; -} diff --git a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java b/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java deleted file mode 100644 index aad7a1f907..0000000000 --- a/mysql-persistence/src/main/java/com/netflix/conductor/mysql/util/TransactionalFunction.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.util; - -import java.sql.Connection; -import java.sql.SQLException; - -/** - * Functional interface for operations within a transactional context. - * - * @author mustafa - */ -@FunctionalInterface -public interface TransactionalFunction { - - R apply(Connection tx) throws SQLException; -} diff --git a/mysql-persistence/src/main/resources/db/migration/V1__initial_schema.sql b/mysql-persistence/src/main/resources/db/migration/V1__initial_schema.sql deleted file mode 100644 index 246b55ecd7..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V1__initial_schema.sql +++ /dev/null @@ -1,172 +0,0 @@ - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR METADATA DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE meta_event_handler ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - event varchar(255) NOT NULL, - active boolean NOT NULL, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - KEY event_handler_name_index (name), - KEY event_handler_event_index (event) -); - -CREATE TABLE meta_task_def ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_task_def_name (name) -); - -CREATE TABLE meta_workflow_def ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - version int(11) NOT NULL, - latest_version int(11) NOT NULL DEFAULT 0, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_name_version (name,version), - KEY workflow_def_name_index (name) -); - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR EXECUTION DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE event_execution ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - event_handler_name varchar(255) NOT NULL, - event_name varchar(255) NOT NULL, - message_id varchar(255) NOT NULL, - execution_id varchar(255) NOT NULL, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_event_execution (event_handler_name,event_name,message_id) -); - -CREATE TABLE poll_data ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - domain varchar(255) NOT NULL, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_poll_data (queue_name,domain), - KEY (queue_name) -); - -CREATE TABLE task_scheduled ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - task_key varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_workflow_id_task_key (workflow_id,task_key) -); - -CREATE TABLE task_in_progress ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - task_def_name varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - workflow_id varchar(255) NOT NULL, - in_progress_status boolean NOT NULL DEFAULT false, - PRIMARY KEY (id), - UNIQUE KEY unique_task_def_task_id1 (task_def_name,task_id) -); - -CREATE TABLE task ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - task_id varchar(255) NOT NULL, - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_task_id (task_id) -); - -CREATE TABLE workflow ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - correlation_id varchar(255), - json_data mediumtext NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_workflow_id (workflow_id) -); - -CREATE TABLE workflow_def_to_workflow ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_def varchar(255) NOT NULL, - date_str integer NOT NULL, - workflow_id varchar(255) NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_workflow_def_date_str (workflow_def,date_str,workflow_id) -); - -CREATE TABLE workflow_pending ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_type varchar(255) NOT NULL, - workflow_id varchar(255) NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_workflow_type_workflow_id (workflow_type,workflow_id), - KEY workflow_type_index (workflow_type) -); - -CREATE TABLE workflow_to_task ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_workflow_to_task_id (workflow_id,task_id), - KEY workflow_id_index (workflow_id) -); - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR QUEUE DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE queue ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - PRIMARY KEY (id), - UNIQUE KEY unique_queue_name (queue_name) -); - -CREATE TABLE queue_message ( - id int(11) unsigned NOT NULL AUTO_INCREMENT, - created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - message_id varchar(255) NOT NULL, - popped boolean DEFAULT false, - offset_time_seconds long, - payload mediumtext, - PRIMARY KEY (id), - UNIQUE KEY unique_queue_name_message_id (queue_name,message_id), - KEY combo_queue_message (queue_name,popped,deliver_on,created_on) -); diff --git a/mysql-persistence/src/main/resources/db/migration/V2__queue_message_timestamps.sql b/mysql-persistence/src/main/resources/db/migration/V2__queue_message_timestamps.sql deleted file mode 100644 index ecf7956be9..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V2__queue_message_timestamps.sql +++ /dev/null @@ -1,2 +0,0 @@ -ALTER TABLE `queue_message` CHANGE `created_on` `created_on` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP; -ALTER TABLE `queue_message` CHANGE `deliver_on` `deliver_on` TIMESTAMP DEFAULT CURRENT_TIMESTAMP; diff --git a/mysql-persistence/src/main/resources/db/migration/V3__queue_add_priority.sql b/mysql-persistence/src/main/resources/db/migration/V3__queue_add_priority.sql deleted file mode 100644 index 2764df8b31..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V3__queue_add_priority.sql +++ /dev/null @@ -1,17 +0,0 @@ -SET @dbname = DATABASE(); -SET @tablename = "queue_message"; -SET @columnname = "priority"; -SET @preparedStatement = (SELECT IF( - ( - SELECT COUNT(*) FROM INFORMATION_SCHEMA.COLUMNS - WHERE - (table_name = @tablename) - AND (table_schema = @dbname) - AND (column_name = @columnname) - ) > 0, - "SELECT 1", - CONCAT("ALTER TABLE ", @tablename, " ADD ", @columnname, " TINYINT DEFAULT 0 AFTER `message_id`") -)); -PREPARE addColumnIfNotExist FROM @preparedStatement; -EXECUTE addColumnIfNotExist; -DEALLOCATE PREPARE addColumnIfNotExist; \ No newline at end of file diff --git a/mysql-persistence/src/main/resources/db/migration/V4__1009_Fix_MySQLExecutionDAO_Index.sql b/mysql-persistence/src/main/resources/db/migration/V4__1009_Fix_MySQLExecutionDAO_Index.sql deleted file mode 100644 index 8787961a84..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V4__1009_Fix_MySQLExecutionDAO_Index.sql +++ /dev/null @@ -1,14 +0,0 @@ -# Drop the 'unique_event_execution' index if it exists -SET @exist := (SELECT COUNT(INDEX_NAME) - FROM information_schema.STATISTICS - WHERE `TABLE_NAME` = 'event_execution' - AND `INDEX_NAME` = 'unique_event_execution' - AND TABLE_SCHEMA = database()); -SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `event_execution` DROP INDEX `unique_event_execution`', - 'SELECT ''INFO: Index already exists.'''); -PREPARE stmt FROM @sqlstmt; -EXECUTE stmt; - -# Create the 'unique_event_execution' index with execution_id column instead of 'message_id' so events can be executed multiple times. -ALTER TABLE `event_execution` - ADD CONSTRAINT `unique_event_execution` UNIQUE (event_handler_name, event_name, execution_id); diff --git a/mysql-persistence/src/main/resources/db/migration/V5__correlation_id_index.sql b/mysql-persistence/src/main/resources/db/migration/V5__correlation_id_index.sql deleted file mode 100644 index 2f13789f37..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V5__correlation_id_index.sql +++ /dev/null @@ -1,13 +0,0 @@ -# Drop the 'workflow_corr_id_index' index if it exists -SET @exist := (SELECT COUNT(INDEX_NAME) - FROM information_schema.STATISTICS - WHERE `TABLE_NAME` = 'workflow' - AND `INDEX_NAME` = 'workflow_corr_id_index' - AND TABLE_SCHEMA = database()); -SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `workflow` DROP INDEX `workflow_corr_id_index`', - 'SELECT ''INFO: Index already exists.'''); -PREPARE stmt FROM @sqlstmt; -EXECUTE stmt; - -# Create the 'workflow_corr_id_index' index with correlation_id column because correlation_id queries are slow in large databases. -CREATE INDEX workflow_corr_id_index ON workflow (correlation_id); diff --git a/mysql-persistence/src/main/resources/db/migration/V6__new_qm_index_with_priority.sql b/mysql-persistence/src/main/resources/db/migration/V6__new_qm_index_with_priority.sql deleted file mode 100644 index de591f972b..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V6__new_qm_index_with_priority.sql +++ /dev/null @@ -1,13 +0,0 @@ -# Drop the 'combo_queue_message' index if it exists -SET @exist := (SELECT COUNT(INDEX_NAME) - FROM information_schema.STATISTICS - WHERE `TABLE_NAME` = 'queue_message' - AND `INDEX_NAME` = 'combo_queue_message' - AND TABLE_SCHEMA = database()); -SET @sqlstmt := IF(@exist > 0, 'ALTER TABLE `queue_message` DROP INDEX `combo_queue_message`', - 'SELECT ''INFO: Index already exists.'''); -PREPARE stmt FROM @sqlstmt; -EXECUTE stmt; - -# Re-create the 'combo_queue_message' index to add priority column because queries that order by priority are slow in large databases. -CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on); diff --git a/mysql-persistence/src/main/resources/db/migration/V7__new_queue_message_pk.sql b/mysql-persistence/src/main/resources/db/migration/V7__new_queue_message_pk.sql deleted file mode 100644 index afad02024e..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V7__new_queue_message_pk.sql +++ /dev/null @@ -1,24 +0,0 @@ -# no longer need separate index if pk is queue_name, message_id -SET @idx_exists := (SELECT COUNT(INDEX_NAME) - FROM information_schema.STATISTICS - WHERE `TABLE_NAME` = 'queue_message' - AND `INDEX_NAME` = 'unique_queue_name_message_id' - AND TABLE_SCHEMA = database()); -SET @idxstmt := IF(@idx_exists > 0, 'ALTER TABLE `queue_message` DROP INDEX `unique_queue_name_message_id`', - 'SELECT ''INFO: Index unique_queue_name_message_id does not exist.'''); -PREPARE stmt1 FROM @idxstmt; -EXECUTE stmt1; - -# remove id column -set @col_exists := (SELECT COUNT(*) - FROM information_schema.COLUMNS - WHERE `TABLE_NAME` = 'queue_message' - AND `COLUMN_NAME` = 'id' - AND TABLE_SCHEMA = database()); -SET @colstmt := IF(@col_exists > 0, 'ALTER TABLE `queue_message` DROP COLUMN `id`', - 'SELECT ''INFO: Column id does not exist.''') ; -PREPARE stmt2 from @colstmt; -EXECUTE stmt2; - -# set primary key to queue_name, message_id -ALTER TABLE queue_message ADD PRIMARY KEY (queue_name, message_id); diff --git a/mysql-persistence/src/main/resources/db/migration/V8__update_pk.sql b/mysql-persistence/src/main/resources/db/migration/V8__update_pk.sql deleted file mode 100644 index f1ed4f7ad7..0000000000 --- a/mysql-persistence/src/main/resources/db/migration/V8__update_pk.sql +++ /dev/null @@ -1,103 +0,0 @@ -DELIMITER $$ -DROP PROCEDURE IF EXISTS `DropIndexIfExists`$$ -CREATE PROCEDURE `DropIndexIfExists`(IN tableName VARCHAR(128), IN indexName VARCHAR(128)) -BEGIN - - DECLARE index_exists INT DEFAULT 0; - - SELECT COUNT(1) INTO index_exists - FROM INFORMATION_SCHEMA.STATISTICS - WHERE TABLE_NAME = tableName - AND INDEX_NAME = indexName - AND TABLE_SCHEMA = database(); - - IF index_exists > 0 THEN - - SELECT CONCAT('INFO: Dropping Index ', indexName, ' on table ', tableName); - SET @stmt = CONCAT('ALTER TABLE ', tableName, ' DROP INDEX ', indexName); - PREPARE st FROM @stmt; - EXECUTE st; - DEALLOCATE PREPARE st; - - ELSE - SELECT CONCAT('INFO: Index ', indexName, ' does not exists on table ', tableName); - END IF; - -END$$ - -DROP PROCEDURE IF EXISTS `FixPkIfNeeded`$$ -CREATE PROCEDURE `FixPkIfNeeded`(IN tableName VARCHAR(128), IN columns VARCHAR(128)) -BEGIN - - DECLARE col_exists INT DEFAULT 0; - - SELECT COUNT(1) INTO col_exists - FROM INFORMATION_SCHEMA.COLUMNS - WHERE TABLE_NAME = tableName - AND COLUMN_NAME = 'id' - AND TABLE_SCHEMA = database(); - - IF col_exists > 0 THEN - - SELECT CONCAT('INFO: Updating PK on table ', tableName); - - SET @stmt = CONCAT('ALTER TABLE ', tableName, ' MODIFY id INT'); - PREPARE st FROM @stmt; - EXECUTE st; - DEALLOCATE PREPARE st; - - SET @stmt = CONCAT('ALTER TABLE ', tableName, ' DROP PRIMARY KEY, ADD PRIMARY KEY (', columns, ')'); - PREPARE st FROM @stmt; - EXECUTE st; - DEALLOCATE PREPARE st; - - SET @stmt = CONCAT('ALTER TABLE ', tableName, ' DROP COLUMN id'); - PREPARE st FROM @stmt; - EXECUTE st; - DEALLOCATE PREPARE st; - - ELSE - SELECT CONCAT('INFO: Column id does not exists on table ', tableName); - END IF; - -END$$ -DELIMITER ; - -CALL DropIndexIfExists('queue_message', 'unique_queue_name_message_id'); -CALL FixPkIfNeeded('queue_message','queue_name, message_id'); - -CALL DropIndexIfExists('queue', 'unique_queue_name'); -CALL FixPkIfNeeded('queue','queue_name'); - -CALL DropIndexIfExists('workflow_to_task', 'unique_workflow_to_task_id'); -CALL FixPkIfNeeded('workflow_to_task', 'workflow_id, task_id'); - -CALL DropIndexIfExists('workflow_pending', 'unique_workflow_type_workflow_id'); -CALL FixPkIfNeeded('workflow_pending', 'workflow_type, workflow_id'); - -CALL DropIndexIfExists('workflow_def_to_workflow', 'unique_workflow_def_date_str'); -CALL FixPkIfNeeded('workflow_def_to_workflow', 'workflow_def, date_str, workflow_id'); - -CALL DropIndexIfExists('workflow', 'unique_workflow_id'); -CALL FixPkIfNeeded('workflow', 'workflow_id'); - -CALL DropIndexIfExists('task', 'unique_task_id'); -CALL FixPkIfNeeded('task', 'task_id'); - -CALL DropIndexIfExists('task_in_progress', 'unique_task_def_task_id1'); -CALL FixPkIfNeeded('task_in_progress', 'task_def_name, task_id'); - -CALL DropIndexIfExists('task_scheduled', 'unique_workflow_id_task_key'); -CALL FixPkIfNeeded('task_scheduled', 'workflow_id, task_key'); - -CALL DropIndexIfExists('poll_data', 'unique_poll_data'); -CALL FixPkIfNeeded('poll_data','queue_name, domain'); - -CALL DropIndexIfExists('event_execution', 'unique_event_execution'); -CALL FixPkIfNeeded('event_execution', 'event_handler_name, event_name, execution_id'); - -CALL DropIndexIfExists('meta_workflow_def', 'unique_name_version'); -CALL FixPkIfNeeded('meta_workflow_def', 'name, version'); - -CALL DropIndexIfExists('meta_task_def', 'unique_task_def_name'); -CALL FixPkIfNeeded('meta_task_def','name'); \ No newline at end of file diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java deleted file mode 100644 index da0769e421..0000000000 --- a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLExecutionDAOTest.java +++ /dev/null @@ -1,81 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.util.List; - -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.ExecutionDAOTest; -import com.netflix.conductor.model.WorkflowModel; -import com.netflix.conductor.mysql.config.MySQLConfiguration; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - MySQLConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class MySQLExecutionDAOTest extends ExecutionDAOTest { - - @Autowired private MySQLExecutionDAO executionDAO; - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void testPendingByCorrelationId() { - - WorkflowDef def = new WorkflowDef(); - def.setName("pending_count_correlation_jtest"); - - WorkflowModel workflow = createTestWorkflow(); - workflow.setWorkflowDefinition(def); - - generateWorkflows(workflow, 10); - - List bycorrelationId = - getExecutionDAO() - .getWorkflowsByCorrelationId( - "pending_count_correlation_jtest", "corr001", true); - assertNotNull(bycorrelationId); - assertEquals(10, bycorrelationId.size()); - } - - @Override - public ExecutionDAO getExecutionDAO() { - return executionDAO; - } -} diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java deleted file mode 100644 index 34250b2c1c..0000000000 --- a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLMetadataDAOTest.java +++ /dev/null @@ -1,286 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.common.metadata.events.EventHandler; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.mysql.config.MySQLConfiguration; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.CONFLICT; -import static com.netflix.conductor.core.exception.ApplicationException.Code.NOT_FOUND; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - MySQLConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class MySQLMetadataDAOTest { - - @Autowired private MySQLMetadataDAO metadataDAO; - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void testDuplicateWorkflowDef() { - - WorkflowDef def = new WorkflowDef(); - def.setName("testDuplicate"); - def.setVersion(1); - - metadataDAO.createWorkflowDef(def); - - ApplicationException applicationException = - assertThrows(ApplicationException.class, () -> metadataDAO.createWorkflowDef(def)); - assertEquals( - "Workflow with testDuplicate.1 already exists!", applicationException.getMessage()); - assertEquals(CONFLICT, applicationException.getCode()); - } - - @Test - public void testRemoveNotExistingWorkflowDef() { - ApplicationException applicationException = - assertThrows( - ApplicationException.class, () -> metadataDAO.removeWorkflowDef("test", 1)); - assertEquals( - "No such workflow definition: test version: 1", applicationException.getMessage()); - assertEquals(NOT_FOUND, applicationException.getCode()); - } - - @Test - public void testWorkflowDefOperations() { - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - def.setVersion(1); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setOwnerApp("ownerApp"); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - metadataDAO.createWorkflowDef(def); - - List all = metadataDAO.getAllWorkflowDefs(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get(); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - - def.setVersion(3); - metadataDAO.createWorkflowDef(def); - - all = metadataDAO.getAllWorkflowDefs(); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(def.getVersion(), found.getVersion()); - assertEquals(3, found.getVersion()); - - all = metadataDAO.getAllLatest(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(3, all.get(0).getVersion()); - - all = metadataDAO.getAllVersions(def.getName()); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals("test", all.get(1).getName()); - assertEquals(1, all.get(0).getVersion()); - assertEquals(3, all.get(1).getVersion()); - - def.setDescription("updated"); - metadataDAO.updateWorkflowDef(def); - found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get(); - assertEquals(def.getDescription(), found.getDescription()); - - List allnames = metadataDAO.findAll(); - assertNotNull(allnames); - assertEquals(1, allnames.size()); - assertEquals(def.getName(), allnames.get(0)); - - def.setVersion(2); - metadataDAO.createWorkflowDef(def); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(3, found.getVersion()); - - metadataDAO.removeWorkflowDef("test", 3); - Optional deleted = metadataDAO.getWorkflowDef("test", 3); - assertFalse(deleted.isPresent()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(2, found.getVersion()); - - metadataDAO.removeWorkflowDef("test", 1); - deleted = metadataDAO.getWorkflowDef("test", 1); - assertFalse(deleted.isPresent()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(2, found.getVersion()); - } - - @Test - public void testTaskDefOperations() { - TaskDef def = new TaskDef("taskA"); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setInputKeys(Arrays.asList("a", "b", "c")); - def.setOutputKeys(Arrays.asList("01", "o2")); - def.setOwnerApp("ownerApp"); - def.setRetryCount(3); - def.setRetryDelaySeconds(100); - def.setRetryLogic(TaskDef.RetryLogic.FIXED); - def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - metadataDAO.createTaskDef(def); - - TaskDef found = metadataDAO.getTaskDef(def.getName()); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - - def.setDescription("updated description"); - metadataDAO.updateTaskDef(def); - found = metadataDAO.getTaskDef(def.getName()); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - assertEquals("updated description", found.getDescription()); - - for (int i = 0; i < 9; i++) { - TaskDef tdf = new TaskDef("taskA" + i); - metadataDAO.createTaskDef(tdf); - } - - List all = metadataDAO.getAllTaskDefs(); - assertNotNull(all); - assertEquals(10, all.size()); - Set allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet()); - assertEquals(10, allnames.size()); - List sorted = allnames.stream().sorted().collect(Collectors.toList()); - assertEquals(def.getName(), sorted.get(0)); - - for (int i = 0; i < 9; i++) { - assertEquals(def.getName() + i, sorted.get(i + 1)); - } - - for (int i = 0; i < 9; i++) { - metadataDAO.removeTaskDef(def.getName() + i); - } - all = metadataDAO.getAllTaskDefs(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(def.getName(), all.get(0).getName()); - } - - @Test - public void testRemoveNotExistingTaskDef() { - ApplicationException applicationException = - assertThrows( - ApplicationException.class, - () -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString())); - assertEquals("No such task definition", applicationException.getMessage()); - assertEquals(NOT_FOUND, applicationException.getCode()); - } - - @Test - public void testEventHandlers() { - String event1 = "SQS::arn:account090:sqstest1"; - String event2 = "SQS::arn:account090:sqstest2"; - - EventHandler eventHandler = new EventHandler(); - eventHandler.setName(UUID.randomUUID().toString()); - eventHandler.setActive(false); - EventHandler.Action action = new EventHandler.Action(); - action.setAction(EventHandler.Action.Type.start_workflow); - action.setStart_workflow(new EventHandler.StartWorkflow()); - action.getStart_workflow().setName("workflow_x"); - eventHandler.getActions().add(action); - eventHandler.setEvent(event1); - - metadataDAO.addEventHandler(eventHandler); - List all = metadataDAO.getAllEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(eventHandler.getName(), all.get(0).getName()); - assertEquals(eventHandler.getEvent(), all.get(0).getEvent()); - - List byEvents = metadataDAO.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); // event is marked as in-active - - eventHandler.setActive(true); - eventHandler.setEvent(event2); - metadataDAO.updateEventHandler(eventHandler); - - all = metadataDAO.getAllEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - - byEvents = metadataDAO.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); - - byEvents = metadataDAO.getEventHandlersForEvent(event2, true); - assertNotNull(byEvents); - assertEquals(1, byEvents.size()); - } -} diff --git a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java b/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java deleted file mode 100644 index 93b32de69c..0000000000 --- a/mysql-persistence/src/test/java/com/netflix/conductor/mysql/dao/MySQLQueueDAOTest.java +++ /dev/null @@ -1,385 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.mysql.dao; - -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.mysql.config.MySQLConfiguration; -import com.netflix.conductor.mysql.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableList; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - MySQLConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class MySQLQueueDAOTest { - - private static final Logger LOGGER = LoggerFactory.getLogger(MySQLQueueDAOTest.class); - - @Autowired private MySQLQueueDAO queueDAO; - - @Autowired private ObjectMapper objectMapper; - - @Qualifier("dataSource") - @Autowired - private DataSource dataSource; - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void complexQueueTest() { - String queueName = "TestQueue"; - long offsetTimeInSecond = 0; - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.push(queueName, messageId, offsetTimeInSecond); - } - int size = queueDAO.getSize(queueName); - assertEquals(10, size); - Map details = queueDAO.queuesDetail(); - assertEquals(1, details.size()); - assertEquals(10L, details.get(queueName).longValue()); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - - List popped = queueDAO.pop(queueName, 10, 100); - assertNotNull(popped); - assertEquals(10, popped.size()); - - Map>> verbose = queueDAO.queuesDetailVerbose(); - assertEquals(1, verbose.size()); - long shardSize = verbose.get(queueName).get("a").get("size"); - long unackedSize = verbose.get(queueName).get("a").get("uacked"); - assertEquals(0, shardSize); - assertEquals(10, unackedSize); - - popped.forEach(messageId -> queueDAO.ack(queueName, messageId)); - - verbose = queueDAO.queuesDetailVerbose(); - assertEquals(1, verbose.size()); - shardSize = verbose.get(queueName).get("a").get("size"); - unackedSize = verbose.get(queueName).get("a").get("uacked"); - assertEquals(0, shardSize); - assertEquals(0, unackedSize); - - popped = queueDAO.pop(queueName, 10, 100); - assertNotNull(popped); - assertEquals(0, popped.size()); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - size = queueDAO.getSize(queueName); - assertEquals(10, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertTrue(queueDAO.containsMessage(queueName, messageId)); - queueDAO.remove(queueName, messageId); - } - - size = queueDAO.getSize(queueName); - assertEquals(0, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - queueDAO.flush(queueName); - size = queueDAO.getSize(queueName); - assertEquals(0, size); - } - - /** Test fix for https://github.com/Netflix/conductor/issues/1892 */ - @Test - public void containsMessageTest() { - String queueName = "TestQueue"; - long offsetTimeInSecond = 0; - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.push(queueName, messageId, offsetTimeInSecond); - } - int size = queueDAO.getSize(queueName); - assertEquals(10, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertTrue(queueDAO.containsMessage(queueName, messageId)); - queueDAO.remove(queueName, messageId); - } - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertFalse(queueDAO.containsMessage(queueName, messageId)); - } - } - - /** - * Test fix for https://github.com/Netflix/conductor/issues/399 - * - * @since 1.8.2-rc5 - */ - @Test - public void pollMessagesTest() { - final List messages = new ArrayList<>(); - final String queueName = "issue399_testQueue"; - final int totalSize = 10; - - for (int i = 0; i < totalSize; i++) { - String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}"; - Message m = new Message("testmsg-" + i, payload, ""); - if (i % 2 == 0) { - // Set priority on message with pair id - m.setPriority(99 - i); - } - messages.add(m); - } - - // Populate the queue with our test message batch - queueDAO.push(queueName, ImmutableList.copyOf(messages)); - - // Assert that all messages were persisted and no extras are in there - assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName)); - - final int firstPollSize = 3; - List firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000); - assertNotNull("First poll was null", firstPoll); - assertFalse("First poll was empty", firstPoll.isEmpty()); - assertEquals("First poll size mismatch", firstPollSize, firstPoll.size()); - - final int secondPollSize = 4; - List secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000); - assertNotNull("Second poll was null", secondPoll); - assertFalse("Second poll was empty", secondPoll.isEmpty()); - assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size()); - - // Assert that the total queue size hasn't changed - assertEquals( - "Total queue size should have remained the same", - totalSize, - queueDAO.getSize(queueName)); - - // Assert that our un-popped messages match our expected size - final long expectedSize = totalSize - firstPollSize - secondPollSize; - try (Connection c = dataSource.getConnection()) { - String UNPOPPED = - "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try (Query q = new Query(objectMapper, c, UNPOPPED)) { - long count = q.addParameter(queueName).executeCount(); - assertEquals("Remaining queue size mismatch", expectedSize, count); - } - } catch (Exception ex) { - fail(ex.getMessage()); - } - } - - /** - * Test fix for https://github.com/Netflix/conductor/issues/448 - * - * @since 1.8.2-rc5 - */ - @Test - public void pollDeferredMessagesTest() throws InterruptedException { - final List messages = new ArrayList<>(); - final String queueName = "issue448_testQueue"; - final int totalSize = 10; - - for (int i = 0; i < totalSize; i++) { - int offset = 0; - if (i < 5) { - offset = 0; - } else if (i == 6 || i == 7) { - // Purposefully skipping id:5 to test out of order deliveries - // Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch - offset = 5; - } else { - // Set all other queue messages to have enough of a delay that they won't - // accidentally - // be picked up. - offset = 10_000 + i; - } - - String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}"; - Message m = new Message("testmsg-" + i, payload, ""); - messages.add(m); - queueDAO.push(queueName, "testmsg-" + i, offset); - } - - // Assert that all messages were persisted and no extras are in there - assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName)); - - final int firstPollSize = 4; - List firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100); - assertNotNull("First poll was null", firstPoll); - assertFalse("First poll was empty", firstPoll.isEmpty()); - assertEquals("First poll size mismatch", firstPollSize, firstPoll.size()); - - List firstPollMessageIds = - messages.stream() - .map(Message::getId) - .collect(Collectors.toList()) - .subList(0, firstPollSize + 1); - - for (int i = 0; i < firstPollSize; i++) { - String actual = firstPoll.get(i).getId(); - assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual)); - } - - final int secondPollSize = 3; - - // Sleep a bit to get the next batch of messages - LOGGER.debug("Sleeping for second poll..."); - Thread.sleep(5_000); - - // Poll for many more messages than expected - List secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100); - assertNotNull("Second poll was null", secondPoll); - assertFalse("Second poll was empty", secondPoll.isEmpty()); - assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size()); - - List expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7"); - for (int i = 0; i < secondPollSize; i++) { - String actual = secondPoll.get(i).getId(); - assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual)); - } - - // Assert that the total queue size hasn't changed - assertEquals( - "Total queue size should have remained the same", - totalSize, - queueDAO.getSize(queueName)); - - // Assert that our un-popped messages match our expected size - final long expectedSize = totalSize - firstPollSize - secondPollSize; - try (Connection c = dataSource.getConnection()) { - String UNPOPPED = - "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try (Query q = new Query(objectMapper, c, UNPOPPED)) { - long count = q.addParameter(queueName).executeCount(); - assertEquals("Remaining queue size mismatch", expectedSize, count); - } - } catch (Exception ex) { - fail(ex.getMessage()); - } - } - - @Test - public void processUnacksTest() { - final String queueName = "process_unacks_test"; - // Count of messages in the queue(s) - final int count = 10; - // Number of messages to process acks for - final int unackedCount = 4; - // A secondary queue to make sure we don't accidentally process other queues - final String otherQueueName = "process_unacks_test_other_queue"; - - // Create testing queue with some messages (but not all) that will be popped/acked. - for (int i = 0; i < count; i++) { - int offset = 0; - if (i >= unackedCount) { - offset = 1_000_000; - } - - queueDAO.push(queueName, "unack-" + i, offset); - } - - // Create a second queue to make sure that unacks don't occur for it - for (int i = 0; i < count; i++) { - queueDAO.push(otherQueueName, "other-" + i, 0); - } - - // Poll for first batch of messages (should be equal to unackedCount) - List polled = queueDAO.pollMessages(queueName, 100, 10_000); - assertNotNull(polled); - assertFalse(polled.isEmpty()); - assertEquals(unackedCount, polled.size()); - - // Poll messages from the other queue so we know they don't get unacked later - queueDAO.pollMessages(otherQueueName, 100, 10_000); - - // Ack one of the polled messages - assertTrue(queueDAO.ack(queueName, "unack-1")); - - // Should have one less un-acked popped message in the queue - Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked"); - assertNotNull(uacked); - assertEquals(uacked.longValue(), unackedCount - 1); - - // Process unacks - queueDAO.processUnacks(queueName); - - // Check uacks for both queues after processing - Map>> details = queueDAO.queuesDetailVerbose(); - uacked = details.get(queueName).get("a").get("uacked"); - assertNotNull(uacked); - assertEquals( - "The messages that were polled should be unacked still", - uacked.longValue(), - unackedCount - 1); - - Long otherUacked = details.get(otherQueueName).get("a").get("uacked"); - assertNotNull(otherUacked); - assertEquals( - "Other queue should have all unacked messages", otherUacked.longValue(), count); - - Long size = queueDAO.queuesDetail().get(queueName); - assertNotNull(size); - assertEquals(size.longValue(), count - unackedCount); - } -} diff --git a/mysql-persistence/src/test/resources/application.properties b/mysql-persistence/src/test/resources/application.properties deleted file mode 100644 index 1abda5a9cc..0000000000 --- a/mysql-persistence/src/test/resources/application.properties +++ /dev/null @@ -1,6 +0,0 @@ -conductor.db.type=mysql -spring.datasource.url=jdbc:tc:mysql:///conductor -spring.datasource.username=root -spring.datasource.password=root -spring.datasource.hikari.maximum-pool-size=8 -spring.datasource.hikari.auto-commit=false diff --git a/postgres-external-storage/README.md b/postgres-external-storage/README.md deleted file mode 100644 index 341d545c3e..0000000000 --- a/postgres-external-storage/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# PostgreSQL External Storage Module - -This module use PostgreSQL to store and retrieve workflows/tasks input/output payload that -went over the thresholds defined in properties named `conductor.[workflow|task].[input|output].payload.threshold.kb`. - -## Configuration - -### Usage - -Cf. Documentation [External Payload Storage](https://netflix.github.io/conductor/externalpayloadstorage/#postgresql-storage) - -### Example - -```properties -conductor.external-payload-storage.type=postgres -conductor.external-payload-storage.postgres.conductor-url=http://localhost:8080 -conductor.external-payload-storage.postgres.url=jdbc:postgresql://postgresql:5432/conductor?charset=utf8&parseTime=true&interpolateParams=true -conductor.external-payload-storage.postgres.username=postgres -conductor.external-payload-storage.postgres.password=postgres -conductor.external-payload-storage.postgres.max-data-rows=1000000 -conductor.external-payload-storage.postgres.max-data-days=0 -conductor.external-payload-storage.postgres.max-data-months=0 -conductor.external-payload-storage.postgres.max-data-years=1 -``` \ No newline at end of file diff --git a/postgres-external-storage/dependencies.lock b/postgres-external-storage/dependencies.lock deleted file mode 100644 index 97578c6a75..0000000000 --- a/postgres-external-storage/dependencies.lock +++ /dev/null @@ -1,2090 +0,0 @@ -{ - "annotationProcessor": { - "org.springframework.boot:spring-boot-configuration-processor": { - "locked": "2.3.12.RELEASE" - } - }, - "compileClasspath": { - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "io.swagger.core.v3:swagger-core", - "io.swagger.core.v3:swagger-models" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "org.webjars:webjars-locator-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.module:jackson-module-parameter-names": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "io.github.classgraph:classgraph": { - "locked": "4.8.143", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "io.swagger.core.v3:swagger-annotations": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "io.swagger.core.v3:swagger-core": { - "locked": "2.2.0", - "transitive": [ - "org.springdoc:springdoc-openapi-common" - ] - }, - "io.swagger.core.v3:swagger-models": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "jakarta.validation:jakarta.validation-api": { - "locked": "2.0.2", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1" - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.apache.tomcat.embed:tomcat-embed-core": { - "locked": "9.0.46", - "transitive": [ - "org.apache.tomcat.embed:tomcat-embed-websocket", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.apache.tomcat.embed:tomcat-embed-websocket": { - "locked": "9.0.46", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, - "org.glassfish:jakarta.el": { - "locked": "3.0.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.zaxxer:HikariCP", - "io.swagger.core.v3:swagger-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.webjars:webjars-locator-core" - ] - }, - "org.springdoc:springdoc-openapi-common": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core" - ] - }, - "org.springdoc:springdoc-openapi-ui": { - "locked": "1.6.8" - }, - "org.springdoc:springdoc-openapi-webmvc-core": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-json": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-tomcat": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-web": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.springframework:spring-web": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-webmvc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.webjars:swagger-ui": { - "locked": "4.10.3", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.webjars:webjars-locator-core": { - "locked": "0.45", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "runtimeClasspath": { - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core", - "io.swagger.core.v3:swagger-models" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.webjars:webjars-locator-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.github.classgraph:classgraph": { - "locked": "4.8.143", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.swagger.core.v3:swagger-annotations": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "io.swagger.core.v3:swagger-core": { - "locked": "2.2.0", - "transitive": [ - "org.springdoc:springdoc-openapi-common" - ] - }, - "io.swagger.core.v3:swagger-models": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "jakarta.validation:jakarta.validation-api": { - "locked": "2.0.2", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "org.postgresql:postgresql" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", - "io.swagger.core.v3:swagger-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.webjars:webjars-locator-core" - ] - }, - "org.springdoc:springdoc-openapi-common": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core" - ] - }, - "org.springdoc:springdoc-openapi-ui": { - "locked": "1.6.8" - }, - "org.springdoc:springdoc-openapi-webmvc-core": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.springframework:spring-web": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-webmvc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core" - ] - }, - "org.webjars:swagger-ui": { - "locked": "4.10.3", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.webjars:webjars-locator-core": { - "locked": "0.45", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testCompileClasspath": { - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.github.docker-java:docker-java-api", - "io.swagger.core.v3:swagger-core", - "io.swagger.core.v3:swagger-models" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "org.webjars:webjars-locator-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.module:jackson-module-parameter-names": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "io.github.classgraph:classgraph": { - "locked": "4.8.143", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "io.swagger.core.v3:swagger-annotations": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "io.swagger.core.v3:swagger-core": { - "locked": "2.2.0", - "transitive": [ - "org.springdoc:springdoc-openapi-common" - ] - }, - "io.swagger.core.v3:swagger-models": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "jakarta.validation:jakarta.validation-api": { - "locked": "2.0.2", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1" - }, - "org.apache.tomcat.embed:tomcat-embed-core": { - "locked": "9.0.46", - "transitive": [ - "org.apache.tomcat.embed:tomcat-embed-websocket", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.apache.tomcat.embed:tomcat-embed-websocket": { - "locked": "9.0.46", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, - "org.glassfish:jakarta.el": { - "locked": "3.0.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-params" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", - "com.jayway.jsonpath:json-path", - "com.zaxxer:HikariCP", - "io.swagger.core.v3:swagger-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers", - "org.webjars:webjars-locator-core" - ] - }, - "org.springdoc:springdoc-openapi-common": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core" - ] - }, - "org.springdoc:springdoc-openapi-ui": { - "locked": "1.6.8" - }, - "org.springdoc:springdoc-openapi-webmvc-core": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-json": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-tomcat": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-web": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.springframework:spring-web": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-webmvc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:database-commons" - ] - }, - "org.webjars:swagger-ui": { - "locked": "4.10.3", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.webjars:webjars-locator-core": { - "locked": "0.45", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "org.springframework.boot:spring-boot-starter" - ] - } - }, - "testRuntimeClasspath": { - "com.fasterxml.jackson.core:jackson-annotations": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.github.docker-java:docker-java-api", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core", - "io.swagger.core.v3:swagger-models" - ] - }, - "com.fasterxml.jackson.core:jackson-core": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.core:jackson-databind", - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.webjars:webjars-locator-core" - ] - }, - "com.fasterxml.jackson.core:jackson-databind": { - "locked": "2.11.4", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8", - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310", - "com.fasterxml.jackson.module:jackson-module-parameter-names", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jdk8": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.datatype:jackson-datatype-jsr310": { - "locked": "2.11.4", - "transitive": [ - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.fasterxml.jackson.module:jackson-module-parameter-names": { - "locked": "2.11.4", - "transitive": [ - "org.springframework.boot:spring-boot-starter-json" - ] - }, - "com.github.docker-java:docker-java-api": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.github.docker-java:docker-java-transport": { - "locked": "3.2.8", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep" - ] - }, - "com.github.docker-java:docker-java-transport-zerodep": { - "locked": "3.2.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "com.google.protobuf:protobuf-java": { - "locked": "3.13.0", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "com.jayway.jsonpath:json-path": { - "locked": "2.4.0", - "transitive": [ - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "com.netflix.conductor:conductor-annotations": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-common" - ] - }, - "com.netflix.conductor:conductor-common": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.netflix.conductor:conductor-core": { - "project": true - }, - "com.netflix.spectator:spectator-api": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.spotify:completable-futures": { - "locked": "0.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "com.vaadin.external.google:android-json": { - "locked": "0.0.20131108.vaadin1", - "transitive": [ - "org.skyscreamer:jsonassert" - ] - }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "commons-io:commons-io": { - "locked": "2.7", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.github.classgraph:classgraph": { - "locked": "4.8.143", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "io.reactivex:rxjava": { - "locked": "1.3.8", - "transitive": [ - "com.netflix.conductor:conductor-core" - ] - }, - "io.swagger.core.v3:swagger-annotations": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "io.swagger.core.v3:swagger-core": { - "locked": "2.2.0", - "transitive": [ - "org.springdoc:springdoc-openapi-common" - ] - }, - "io.swagger.core.v3:swagger-models": { - "locked": "2.2.0", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.activation:jakarta.activation-api": { - "locked": "1.2.2", - "transitive": [ - "com.netflix.conductor:conductor-core", - "jakarta.xml.bind:jakarta.xml.bind-api" - ] - }, - "jakarta.annotation:jakarta.annotation-api": { - "locked": "1.3.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "jakarta.validation:jakarta.validation-api": { - "locked": "2.0.2", - "transitive": [ - "io.swagger.core.v3:swagger-core" - ] - }, - "jakarta.xml.bind:jakarta.xml.bind-api": { - "locked": "2.3.3", - "transitive": [ - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "junit:junit": { - "locked": "4.13.2", - "transitive": [ - "org.junit.vintage:junit-vintage-engine", - "org.testcontainers:testcontainers" - ] - }, - "net.bytebuddy:byte-buddy": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.bytebuddy:byte-buddy-agent": { - "locked": "1.10.22", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "net.java.dev.jna:jna": { - "locked": "5.8.0", - "transitive": [ - "com.github.docker-java:docker-java-transport-zerodep", - "org.rnorth.visible-assertions:visible-assertions" - ] - }, - "net.minidev:accessors-smart": { - "locked": "2.3.1", - "transitive": [ - "net.minidev:json-smart" - ] - }, - "net.minidev:json-smart": { - "locked": "2.3.1", - "transitive": [ - "com.jayway.jsonpath:json-path" - ] - }, - "org.apache.bval:bval-jsr": { - "locked": "2.0.5", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.commons:commons-compress": { - "locked": "1.20", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.apache.commons:commons-lang3": { - "locked": "3.10", - "transitive": [ - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "io.swagger.core.v3:swagger-core" - ] - }, - "org.apache.logging.log4j:log4j-api": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-core", - "org.apache.logging.log4j:log4j-jul", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web" - ] - }, - "org.apache.logging.log4j:log4j-core": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.apache.logging.log4j:log4j-web", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-jul": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-slf4j-impl": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core", - "org.springframework.boot:spring-boot-starter-log4j2" - ] - }, - "org.apache.logging.log4j:log4j-web": { - "locked": "2.17.1", - "transitive": [ - "com.netflix.conductor:conductor-annotations", - "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-core" - ] - }, - "org.apache.tomcat.embed:tomcat-embed-core": { - "locked": "9.0.46", - "transitive": [ - "org.apache.tomcat.embed:tomcat-embed-websocket", - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.apache.tomcat.embed:tomcat-embed-websocket": { - "locked": "9.0.46", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.apiguardian:apiguardian-api": { - "locked": "1.1.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.assertj:assertj-core": { - "locked": "3.16.1", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.checkerframework:checker-qual": { - "locked": "3.5.0", - "transitive": [ - "org.postgresql:postgresql" - ] - }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4" - }, - "org.glassfish:jakarta.el": { - "locked": "3.0.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-tomcat" - ] - }, - "org.hamcrest:hamcrest": { - "locked": "2.2", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit.jupiter:junit-jupiter-api": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.mockito:mockito-junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-engine": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.jupiter:junit-jupiter-params": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter" - ] - }, - "org.junit.platform:junit-platform-commons": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.junit.platform:junit-platform-engine": { - "locked": "1.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.junit.vintage:junit-vintage-engine": { - "locked": "5.6.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.junit:junit-bom": { - "locked": "5.6.3", - "transitive": [ - "org.junit.jupiter:junit-jupiter", - "org.junit.jupiter:junit-jupiter-api", - "org.junit.jupiter:junit-jupiter-engine", - "org.junit.jupiter:junit-jupiter-params", - "org.junit.platform:junit-platform-commons", - "org.junit.platform:junit-platform-engine", - "org.junit.vintage:junit-vintage-engine" - ] - }, - "org.mockito:mockito-core": { - "locked": "3.3.3", - "transitive": [ - "org.mockito:mockito-junit-jupiter", - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.mockito:mockito-junit-jupiter": { - "locked": "3.3.3", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.objenesis:objenesis": { - "locked": "2.6", - "transitive": [ - "org.mockito:mockito-core" - ] - }, - "org.opentest4j:opentest4j": { - "locked": "1.2.0", - "transitive": [ - "org.junit.jupiter:junit-jupiter-api", - "org.junit.platform:junit-platform-engine" - ] - }, - "org.ow2.asm:asm": { - "locked": "5.0.4", - "transitive": [ - "net.minidev:accessors-smart" - ] - }, - "org.postgresql:postgresql": { - "locked": "42.2.20" - }, - "org.rnorth.duct-tape:duct-tape": { - "locked": "1.0.8", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.rnorth.visible-assertions:visible-assertions": { - "locked": "2.1.2", - "transitive": [ - "org.testcontainers:testcontainers" - ] - }, - "org.skyscreamer:jsonassert": { - "locked": "1.5.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.slf4j:jul-to-slf4j": { - "locked": "1.7.30", - "transitive": [ - "org.springframework.boot:spring-boot-starter-log4j2", - "org.springframework.boot:spring-boot-starter-logging" - ] - }, - "org.slf4j:slf4j-api": { - "locked": "1.7.30", - "transitive": [ - "com.github.docker-java:docker-java-api", - "com.github.docker-java:docker-java-transport-zerodep", - "com.jayway.jsonpath:json-path", - "com.netflix.spectator:spectator-api", - "com.zaxxer:HikariCP", - "io.swagger.core.v3:swagger-core", - "org.apache.logging.log4j:log4j-slf4j-impl", - "org.slf4j:jul-to-slf4j", - "org.testcontainers:testcontainers", - "org.webjars:webjars-locator-core" - ] - }, - "org.springdoc:springdoc-openapi-common": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core" - ] - }, - "org.springdoc:springdoc-openapi-ui": { - "locked": "1.6.8" - }, - "org.springdoc:springdoc-openapi-webmvc-core": { - "locked": "1.6.8", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.springframework.boot:spring-boot": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-autoconfigure", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-starter": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-json": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-log4j2": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-logging": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter" - ] - }, - "org.springframework.boot:spring-boot-starter-test": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-starter-tomcat": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.springframework.boot:spring-boot-starter-web": { - "locked": "2.3.12.RELEASE" - }, - "org.springframework.boot:spring-boot-test": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test", - "org.springframework.boot:spring-boot-test-autoconfigure" - ] - }, - "org.springframework.boot:spring-boot-test-autoconfigure": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-aop": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-beans": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-aop", - "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-context": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-core": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot", - "org.springframework.boot:spring-boot-starter", - "org.springframework.boot:spring-boot-starter-test", - "org.springframework:spring-aop", - "org.springframework:spring-beans", - "org.springframework:spring-context", - "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-test", - "org.springframework:spring-tx", - "org.springframework:spring-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-expression": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-context", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-jcl": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-core" - ] - }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-test": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, - "org.springframework:spring-web": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-common", - "org.springframework.boot:spring-boot-starter-json", - "org.springframework.boot:spring-boot-starter-web", - "org.springframework:spring-webmvc" - ] - }, - "org.springframework:spring-webmvc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springdoc:springdoc-openapi-webmvc-core", - "org.springframework.boot:spring-boot-starter-web" - ] - }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, - "org.testcontainers:testcontainers": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:database-commons" - ] - }, - "org.webjars:swagger-ui": { - "locked": "4.10.3", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.webjars:webjars-locator-core": { - "locked": "0.45", - "transitive": [ - "org.springdoc:springdoc-openapi-ui" - ] - }, - "org.xmlunit:xmlunit-core": { - "locked": "2.7.0", - "transitive": [ - "org.springframework.boot:spring-boot-starter-test" - ] - }, - "org.yaml:snakeyaml": { - "locked": "1.26", - "transitive": [ - "com.fasterxml.jackson.dataformat:jackson-dataformat-yaml", - "org.springframework.boot:spring-boot-starter" - ] - } - } -} \ No newline at end of file diff --git a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java b/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java deleted file mode 100644 index 329465f99f..0000000000 --- a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadConfiguration.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.config; - -import java.util.Map; - -import javax.annotation.PostConstruct; -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.boot.jdbc.DataSourceBuilder; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.postgres.storage.PostgresPayloadStorage; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(PostgresPayloadProperties.class) -@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres") -public class PostgresPayloadConfiguration { - - PostgresPayloadProperties properties; - - public PostgresPayloadConfiguration(PostgresPayloadProperties properties) { - this.properties = properties; - } - - @Bean(initMethod = "migrate") - @PostConstruct - public Flyway flywayForExternalDb() { - return Flyway.configure() - .locations("classpath:db/migration_external_postgres") - .schemas("external") - .baselineOnMigrate(true) - .placeholderReplacement(true) - .placeholders( - Map.of( - "tableName", - properties.getTableName(), - "maxDataRows", - String.valueOf(properties.getMaxDataRows()), - "maxDataDays", - "'" + properties.getMaxDataDays() + "'", - "maxDataMonths", - "'" + properties.getMaxDataMonths() + "'", - "maxDataYears", - "'" + properties.getMaxDataYears() + "'")) - .dataSource( - DataSourceBuilder.create() - .driverClassName("org.postgresql.Driver") - .url(properties.getUrl()) - .username(properties.getUsername()) - .password(properties.getPassword()) - .build()) - .load(); - } - - @Bean - public ExternalPayloadStorage postgresExternalPayloadStorage( - IDGenerator idGenerator, PostgresPayloadProperties properties) { - DataSource dataSource = - DataSourceBuilder.create() - .driverClassName("org.postgresql.Driver") - .url(properties.getUrl()) - .username(properties.getUsername()) - .password(properties.getPassword()) - .build(); - return new PostgresPayloadStorage(idGenerator, properties, dataSource); - } -} diff --git a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java b/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java deleted file mode 100644 index 28a33c1186..0000000000 --- a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/config/PostgresPayloadProperties.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.config; - -import org.springframework.boot.context.properties.ConfigurationProperties; - -@ConfigurationProperties("conductor.external-payload-storage.postgres") -public class PostgresPayloadProperties { - - /** The PostgreSQL schema and table name where the payloads will be stored */ - private String tableName = "external.external_payload"; - - /** Username for connecting to PostgreSQL database */ - private String username; - - /** Password for connecting to PostgreSQL database */ - private String password; - - /** URL for connecting to PostgreSQL database */ - private String url; - - /** - * Maximum count of data rows in PostgreSQL database. After overcoming this limit, the oldest - * data will be deleted. - */ - private long maxDataRows = Long.MAX_VALUE; - - /** - * Maximum count of days of data age in PostgreSQL database. After overcoming limit, the oldest - * data will be deleted. - */ - private int maxDataDays = 0; - - /** - * Maximum count of months of data age in PostgreSQL database. After overcoming limit, the - * oldest data will be deleted. - */ - private int maxDataMonths = 0; - - /** - * Maximum count of years of data age in PostgreSQL database. After overcoming limit, the oldest - * data will be deleted. - */ - private int maxDataYears = 1; - - /** - * URL, that can be used to pull the json configurations, that will be downloaded from - * PostgreSQL to the conductor server. For example: for local development it is - * "http://localhost:8080" - */ - private String conductorUrl = ""; - - public String getTableName() { - return tableName; - } - - public String getUsername() { - return username; - } - - public String getPassword() { - return password; - } - - public String getUrl() { - return url; - } - - public String getConductorUrl() { - return conductorUrl; - } - - public long getMaxDataRows() { - return maxDataRows; - } - - public int getMaxDataDays() { - return maxDataDays; - } - - public int getMaxDataMonths() { - return maxDataMonths; - } - - public int getMaxDataYears() { - return maxDataYears; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public void setUsername(String username) { - this.username = username; - } - - public void setPassword(String password) { - this.password = password; - } - - public void setUrl(String url) { - this.url = url; - } - - public void setConductorUrl(String conductorUrl) { - this.conductorUrl = conductorUrl; - } - - public void setMaxDataRows(long maxDataRows) { - this.maxDataRows = maxDataRows; - } - - public void setMaxDataDays(int maxDataDays) { - this.maxDataDays = maxDataDays; - } - - public void setMaxDataMonths(int maxDataMonths) { - this.maxDataMonths = maxDataMonths; - } - - public void setMaxDataYears(int maxDataYears) { - this.maxDataYears = maxDataYears; - } -} diff --git a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java b/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java deleted file mode 100644 index 7ed4917e26..0000000000 --- a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResource.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.controller; - -import java.io.InputStream; - -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.core.io.InputStreamResource; -import org.springframework.http.MediaType; -import org.springframework.http.ResponseEntity; -import org.springframework.web.bind.annotation.GetMapping; -import org.springframework.web.bind.annotation.PathVariable; -import org.springframework.web.bind.annotation.RequestMapping; -import org.springframework.web.bind.annotation.RestController; - -import com.netflix.conductor.common.utils.ExternalPayloadStorage; - -import io.swagger.v3.oas.annotations.Operation; - -/** - * REST controller for pulling payload stream of data by key (externalPayloadPath) from PostgreSQL - * database - */ -@RestController -@RequestMapping(value = "/api/external/postgres") -@ConditionalOnProperty(name = "conductor.external-payload-storage.type", havingValue = "postgres") -public class ExternalPostgresPayloadResource { - - private final ExternalPayloadStorage postgresService; - - public ExternalPostgresPayloadResource( - @Qualifier("postgresExternalPayloadStorage") ExternalPayloadStorage postgresService) { - this.postgresService = postgresService; - } - - @GetMapping("/{externalPayloadPath}") - @Operation( - summary = - "Get task or workflow by externalPayloadPath from External PostgreSQL Storage") - public ResponseEntity getExternalStorageData( - @PathVariable("externalPayloadPath") String externalPayloadPath) { - InputStream inputStream = postgresService.download(externalPayloadPath); - InputStreamResource outputStreamBody = new InputStreamResource(inputStream); - return ResponseEntity.ok().contentType(MediaType.APPLICATION_JSON).body(outputStreamBody); - } -} diff --git a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java b/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java deleted file mode 100644 index 6b3c4a3f9b..0000000000 --- a/postgres-external-storage/src/main/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorage.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.storage; - -import java.io.InputStream; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import javax.sql.DataSource; - -import org.apache.commons.lang3.StringUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.common.run.ExternalStorageLocation; -import com.netflix.conductor.common.utils.ExternalPayloadStorage; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.core.utils.IDGenerator; -import com.netflix.conductor.postgres.config.PostgresPayloadProperties; - -/** - * Store and pull the external payload which consists of key and stream of data in PostgreSQL - * database - */ -public class PostgresPayloadStorage implements ExternalPayloadStorage { - - private static final Logger LOGGER = LoggerFactory.getLogger(PostgresPayloadStorage.class); - - private final DataSource postgresDataSource; - private final String tableName; - private final String conductorUrl; - private final IDGenerator idGenerator; - - public PostgresPayloadStorage( - IDGenerator idGenerator, PostgresPayloadProperties properties, DataSource dataSource) { - this.idGenerator = idGenerator; - tableName = properties.getTableName(); - conductorUrl = properties.getConductorUrl(); - this.postgresDataSource = dataSource; - LOGGER.info("PostgreSQL Extenal Payload Storage initialized."); - } - - /** - * @param operation the type of {@link Operation} to be performed - * @param payloadType the {@link PayloadType} that is being accessed - * @return a {@link ExternalStorageLocation} object which contains the pre-signed URL and the - * PostgreSQL object key for the json payload - */ - @Override - public ExternalStorageLocation getLocation( - Operation operation, PayloadType payloadType, String path) { - - ExternalStorageLocation externalStorageLocation = new ExternalStorageLocation(); - String objectKey; - if (StringUtils.isNotBlank(path)) { - objectKey = path; - } else { - objectKey = idGenerator.generate() + ".json"; - } - String uri = conductorUrl + "/api/external/postgres/" + objectKey; - externalStorageLocation.setUri(uri); - externalStorageLocation.setPath(objectKey); - LOGGER.debug("External storage location URI: {}, location path: {}", uri, objectKey); - return externalStorageLocation; - } - - /** - * Uploads the payload to the given PostgreSQL object key. It is expected that the caller - * retrieves the object key using {@link #getLocation(Operation, PayloadType, String)} before - * making this call. - * - * @param key the PostgreSQL key of the object to be uploaded - * @param payload an {@link InputStream} containing the json payload which is to be uploaded - * @param payloadSize the size of the json payload in bytes - */ - @Override - public void upload(String key, InputStream payload, long payloadSize) { - try (Connection conn = postgresDataSource.getConnection(); - PreparedStatement stmt = - conn.prepareStatement("INSERT INTO " + tableName + " VALUES (?, ?)")) { - stmt.setString(1, key); - stmt.setBinaryStream(2, payload, payloadSize); - stmt.executeUpdate(); - LOGGER.debug( - "External PostgreSQL uploaded key: {}, payload size: {}", key, payloadSize); - } catch (SQLException e) { - String msg = "Error uploading data into External PostgreSQL"; - LOGGER.error(msg, e); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); - } - } - - /** - * Downloads the payload stored in the PostgreSQL. - * - * @param key the PostgreSQL key of the object - * @return an input stream containing the contents of the object. Caller is expected to close - * the input stream. - */ - @Override - public InputStream download(String key) { - InputStream inputStream; - try (Connection conn = postgresDataSource.getConnection(); - PreparedStatement stmt = - conn.prepareStatement("SELECT data FROM " + tableName + " WHERE id = ?")) { - stmt.setString(1, key); - ResultSet rs = stmt.executeQuery(); - rs.next(); - inputStream = rs.getBinaryStream(1); - rs.close(); - LOGGER.debug("External PostgreSQL downloaded key: {}", key); - } catch (SQLException e) { - String msg = "Error downloading data from external PostgreSQL"; - LOGGER.error(msg, e); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, msg, e); - } - return inputStream; - } -} diff --git a/postgres-external-storage/src/main/resources/db/migration_external_postgres/R__initial_schema.sql b/postgres-external-storage/src/main/resources/db/migration_external_postgres/R__initial_schema.sql deleted file mode 100644 index 0d0d20dfa4..0000000000 --- a/postgres-external-storage/src/main/resources/db/migration_external_postgres/R__initial_schema.sql +++ /dev/null @@ -1,56 +0,0 @@ --- --- Copyright 2022 Netflix, Inc. --- --- Licensed under the Apache License, Version 2.0 (the "License"); --- you may not use this file except in compliance with the License. --- You may obtain a copy of the License at --- --- http://www.apache.org/licenses/LICENSE-2.0 --- --- Unless required by applicable law or agreed to in writing, software --- distributed under the License is distributed on an "AS IS" BASIS, --- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. --- See the License for the specific language governing permissions and --- limitations under the License. --- - - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR EXTERNAL PAYLOAD POSTGRES STORAGE --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE IF NOT EXISTS ${tableName} -( - id TEXT, - data bytea NOT NULL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - PRIMARY KEY (id) -); - -ALTER TABLE ${tableName} ALTER COLUMN data SET STORAGE EXTERNAL; - --- Delete trigger to delete the oldest external_payload rows, --- when there are too many or there are too old. - -DROP TRIGGER IF EXISTS tr_keep_row_number_steady ON ${tableName}; - -CREATE OR REPLACE FUNCTION keep_row_number_steady() - RETURNS TRIGGER AS -$body$ -DECLARE - time_interval interval := concat(${maxDataYears},' years ',${maxDataMonths},' mons ',${maxDataDays},' days' ); -BEGIN - WHILE ((SELECT count(id) FROM ${tableName}) > ${maxDataRows}) OR - ((SELECT min(created_on) FROM ${tableName}) < (CURRENT_TIMESTAMP - time_interval)) - LOOP - DELETE FROM ${tableName} - WHERE created_on = (SELECT min(created_on) FROM ${tableName}); - END LOOP; - RETURN NULL; -END; -$body$ - LANGUAGE plpgsql; - -CREATE TRIGGER tr_keep_row_number_steady - AFTER INSERT ON ${tableName} - FOR EACH ROW EXECUTE PROCEDURE keep_row_number_steady(); \ No newline at end of file diff --git a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java b/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java deleted file mode 100644 index 2b5bd77c97..0000000000 --- a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/controller/ExternalPostgresPayloadResourceTest.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.controller; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; - -import org.junit.Before; -import org.junit.Test; -import org.springframework.core.io.InputStreamResource; -import org.springframework.http.ResponseEntity; - -import com.netflix.conductor.postgres.storage.PostgresPayloadStorage; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ExternalPostgresPayloadResourceTest { - - private PostgresPayloadStorage mockPayloadStorage; - private ExternalPostgresPayloadResource postgresResource; - - @Before - public void before() { - this.mockPayloadStorage = mock(PostgresPayloadStorage.class); - this.postgresResource = new ExternalPostgresPayloadResource(this.mockPayloadStorage); - } - - @Test - public void testGetExternalStorageData() throws IOException { - String data = "Dummy data"; - InputStream inputStreamData = - new ByteArrayInputStream(data.getBytes(StandardCharsets.UTF_8)); - when(mockPayloadStorage.download(anyString())).thenReturn(inputStreamData); - ResponseEntity response = - postgresResource.getExternalStorageData("dummyKey.json"); - assertNotNull(response.getBody()); - assertEquals( - data, - new String( - response.getBody().getInputStream().readAllBytes(), - StandardCharsets.UTF_8)); - } -} diff --git a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java b/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java deleted file mode 100644 index 219fd24b90..0000000000 --- a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadStorageTest.java +++ /dev/null @@ -1,128 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.storage; - -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.nio.charset.StandardCharsets; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; -import org.testcontainers.containers.PostgreSQLContainer; -import org.testcontainers.utility.DockerImageName; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.core.utils.IDGenerator; - -import static org.junit.Assert.assertEquals; - -@ContextConfiguration(classes = {TestObjectMapperConfiguration.class}) -@RunWith(SpringRunner.class) -public class PostgresPayloadStorageTest { - - private PostgresPayloadTestUtil testPostgres; - private PostgresPayloadStorage executionPostgres; - - public PostgreSQLContainer postgreSQLContainer; - - private final String inputString = - "Lorem Ipsum is simply dummy text of the printing and typesetting industry." - + " Lorem Ipsum has been the industry's standard dummy text ever since the 1500s."; - private final InputStream inputData; - private final String key = "dummyKey.json"; - - public PostgresPayloadStorageTest() { - inputData = new ByteArrayInputStream(inputString.getBytes(StandardCharsets.UTF_8)); - } - - @Before - public void setup() { - postgreSQLContainer = - new PostgreSQLContainer<>(DockerImageName.parse("postgres")) - .withDatabaseName("conductor"); - postgreSQLContainer.start(); - testPostgres = new PostgresPayloadTestUtil(postgreSQLContainer); - executionPostgres = - new PostgresPayloadStorage( - new IDGenerator(), - testPostgres.getTestProperties(), - testPostgres.getDataSource()); - } - - @Test - public void testWriteInputStreamToDb() throws IOException, SQLException { - executionPostgres.upload(key, inputData, inputData.available()); - - PreparedStatement stmt = - testPostgres - .getDataSource() - .getConnection() - .prepareStatement( - "SELECT data FROM external.external_payload WHERE id = 'dummyKey.json'"); - ResultSet rs = stmt.executeQuery(); - rs.next(); - assertEquals( - inputString, - new String(rs.getBinaryStream(1).readAllBytes(), StandardCharsets.UTF_8)); - } - - @Test - public void testReadInputStreamFromDb() throws IOException, SQLException { - PreparedStatement stmt = - testPostgres - .getDataSource() - .getConnection() - .prepareStatement("INSERT INTO external.external_payload VALUES (?, ?)"); - stmt.setString(1, key); - stmt.setBinaryStream(2, inputData, inputData.available()); - stmt.executeUpdate(); - - assertEquals( - inputString, - new String(executionPostgres.download(key).readAllBytes(), StandardCharsets.UTF_8)); - } - - @Test - public void testMaxRowInTable() throws IOException, SQLException { - executionPostgres.upload(key, inputData, inputData.available()); - executionPostgres.upload("dummyKey2.json", inputData, inputData.available()); - executionPostgres.upload("dummyKey3.json", inputData, inputData.available()); - executionPostgres.upload("dummyKey4.json", inputData, inputData.available()); - executionPostgres.upload("dummyKey5.json", inputData, inputData.available()); - executionPostgres.upload("dummyKey6.json", inputData, inputData.available()); - executionPostgres.upload("dummyKey7.json", inputData, inputData.available()); - - PreparedStatement stmt = - testPostgres - .getDataSource() - .getConnection() - .prepareStatement("SELECT count(id) FROM external.external_payload"); - ResultSet rs = stmt.executeQuery(); - rs.next(); - assertEquals(5, rs.getInt(1)); - stmt.close(); - } - - @After - public void teardown() throws SQLException { - testPostgres.getDataSource().getConnection().close(); - } -} diff --git a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java b/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java deleted file mode 100644 index 1bd0b5e493..0000000000 --- a/postgres-external-storage/src/test/java/com/netflix/conductor/postgres/storage/PostgresPayloadTestUtil.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.storage; - -import java.nio.file.Paths; -import java.util.Map; - -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.flywaydb.core.api.configuration.FluentConfiguration; -import org.springframework.boot.jdbc.DataSourceBuilder; -import org.testcontainers.containers.PostgreSQLContainer; - -import com.netflix.conductor.postgres.config.PostgresPayloadProperties; - -public class PostgresPayloadTestUtil { - - private final DataSource dataSource; - private final PostgresPayloadProperties properties = new PostgresPayloadProperties(); - - public PostgresPayloadTestUtil(PostgreSQLContainer postgreSQLContainer) { - - this.dataSource = - DataSourceBuilder.create() - .url(postgreSQLContainer.getJdbcUrl()) - .username(postgreSQLContainer.getUsername()) - .password(postgreSQLContainer.getPassword()) - .build(); - flywayMigrate(dataSource); - } - - private void flywayMigrate(DataSource dataSource) { - FluentConfiguration fluentConfiguration = - Flyway.configure() - .schemas("external") - .locations(Paths.get("db/migration_external_postgres").toString()) - .dataSource(dataSource) - .placeholderReplacement(true) - .placeholders( - Map.of( - "tableName", - "external.external_payload", - "maxDataRows", - "5", - "maxDataDays", - "'1'", - "maxDataMonths", - "'1'", - "maxDataYears", - "'1'")); - - Flyway flyway = fluentConfiguration.load(); - flyway.migrate(); - } - - public DataSource getDataSource() { - return dataSource; - } - - public PostgresPayloadProperties getTestProperties() { - return properties; - } -} diff --git a/postgres-persistence/build.gradle b/postgres-persistence/build.gradle deleted file mode 100644 index 2b17febbb2..0000000000 --- a/postgres-persistence/build.gradle +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Copyright 2021 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -dependencies { - implementation project(':conductor-common') - implementation project(':conductor-core') - compileOnly 'org.springframework.boot:spring-boot-starter' - compileOnly 'org.springframework.retry:spring-retry' - - // SBMTODO: remove guava dep - implementation "com.google.guava:guava:${revGuava}" - - implementation "com.fasterxml.jackson.core:jackson-databind" - implementation "com.fasterxml.jackson.core:jackson-core" - - implementation "org.apache.commons:commons-lang3" - implementation "org.postgresql:postgresql" - implementation "org.springframework.boot:spring-boot-starter-jdbc" - implementation "org.flywaydb:flyway-core" - - testImplementation 'org.springframework.retry:spring-retry' - testImplementation "org.testcontainers:postgresql:${revTestContainer}" - - testImplementation project(':conductor-core').sourceSets.test.output - testImplementation project(':conductor-common').sourceSets.test.output -} - -test { - //the SQL unit tests must run within the same JVM to share the same embedded DB - maxParallelForks = 1 -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java deleted file mode 100644 index bb86096e52..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresConfiguration.java +++ /dev/null @@ -1,133 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.config; - -import java.sql.SQLException; -import java.util.Optional; - -import javax.annotation.PostConstruct; -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; -import org.springframework.context.annotation.DependsOn; -import org.springframework.context.annotation.Import; -import org.springframework.retry.RetryContext; -import org.springframework.retry.backoff.NoBackOffPolicy; -import org.springframework.retry.policy.SimpleRetryPolicy; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.postgres.dao.PostgresExecutionDAO; -import com.netflix.conductor.postgres.dao.PostgresMetadataDAO; -import com.netflix.conductor.postgres.dao.PostgresQueueDAO; - -import com.fasterxml.jackson.databind.ObjectMapper; - -@Configuration(proxyBeanMethods = false) -@EnableConfigurationProperties(PostgresProperties.class) -@ConditionalOnProperty(name = "conductor.db.type", havingValue = "postgres") -// Import the DataSourceAutoConfiguration when postgres database is selected. -// By default, the datasource configuration is excluded in the main module. -@Import(DataSourceAutoConfiguration.class) -public class PostgresConfiguration { - - DataSource dataSource; - - public PostgresConfiguration(DataSource dataSource) { - this.dataSource = dataSource; - } - - @Bean(initMethod = "migrate") - @PostConstruct - public Flyway flywayForPrimaryDb() { - return Flyway.configure() - .locations("classpath:db/migration_postgres") - .schemas("public") - .dataSource(dataSource) - .baselineOnMigrate(true) - .load(); - } - - @Bean - @DependsOn({"flywayForPrimaryDb"}) - public PostgresMetadataDAO postgresMetadataDAO( - @Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper, - PostgresProperties properties) { - return new PostgresMetadataDAO(retryTemplate, objectMapper, dataSource, properties); - } - - @Bean - @DependsOn({"flywayForPrimaryDb"}) - public PostgresExecutionDAO postgresExecutionDAO( - @Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper) { - return new PostgresExecutionDAO(retryTemplate, objectMapper, dataSource); - } - - @Bean - @DependsOn({"flywayForPrimaryDb"}) - public PostgresQueueDAO postgresQueueDAO( - @Qualifier("postgresRetryTemplate") RetryTemplate retryTemplate, - ObjectMapper objectMapper) { - return new PostgresQueueDAO(retryTemplate, objectMapper, dataSource); - } - - @Bean - public RetryTemplate postgresRetryTemplate(PostgresProperties properties) { - SimpleRetryPolicy retryPolicy = new CustomRetryPolicy(); - retryPolicy.setMaxAttempts(3); - - RetryTemplate retryTemplate = new RetryTemplate(); - retryTemplate.setRetryPolicy(retryPolicy); - retryTemplate.setBackOffPolicy(new NoBackOffPolicy()); - return retryTemplate; - } - - public static class CustomRetryPolicy extends SimpleRetryPolicy { - - private static final String ER_LOCK_DEADLOCK = "40P01"; - private static final String ER_SERIALIZATION_FAILURE = "40001"; - - @Override - public boolean canRetry(final RetryContext context) { - final Optional lastThrowable = - Optional.ofNullable(context.getLastThrowable()); - return lastThrowable - .map(throwable -> super.canRetry(context) && isDeadLockError(throwable)) - .orElseGet(() -> super.canRetry(context)); - } - - private boolean isDeadLockError(Throwable throwable) { - SQLException sqlException = findCauseSQLException(throwable); - if (sqlException == null) { - return false; - } - return ER_LOCK_DEADLOCK.equals(sqlException.getSQLState()) - || ER_SERIALIZATION_FAILURE.equals(sqlException.getSQLState()); - } - - private SQLException findCauseSQLException(Throwable throwable) { - Throwable causeException = throwable; - while (null != causeException && !(causeException instanceof SQLException)) { - causeException = causeException.getCause(); - } - return (SQLException) causeException; - } - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java deleted file mode 100644 index 3226b5d17f..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/config/PostgresProperties.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.config; - -import java.time.Duration; -import java.time.temporal.ChronoUnit; - -import org.springframework.boot.context.properties.ConfigurationProperties; -import org.springframework.boot.convert.DurationUnit; - -@ConfigurationProperties("conductor.postgres") -public class PostgresProperties { - - /** The time in seconds after which the in-memory task definitions cache will be refreshed */ - @DurationUnit(ChronoUnit.SECONDS) - private Duration taskDefCacheRefreshInterval = Duration.ofSeconds(60); - - private Integer deadlockRetryMax = 3; - - public Duration getTaskDefCacheRefreshInterval() { - return taskDefCacheRefreshInterval; - } - - public void setTaskDefCacheRefreshInterval(Duration taskDefCacheRefreshInterval) { - this.taskDefCacheRefreshInterval = taskDefCacheRefreshInterval; - } - - public Integer getDeadlockRetryMax() { - return deadlockRetryMax; - } - - public void setDeadlockRetryMax(Integer deadlockRetryMax) { - this.deadlockRetryMax = deadlockRetryMax; - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java deleted file mode 100644 index 74fb9abc11..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresBaseDAO.java +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.SQLException; -import java.time.Duration; -import java.time.Instant; -import java.util.Arrays; -import java.util.List; -import java.util.function.Consumer; - -import javax.sql.DataSource; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.postgres.util.*; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.core.type.TypeReference; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableList; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.*; - -public abstract class PostgresBaseDAO { - - private static final List EXCLUDED_STACKTRACE_CLASS = - ImmutableList.of(PostgresBaseDAO.class.getName(), Thread.class.getName()); - - protected final Logger logger = LoggerFactory.getLogger(getClass()); - protected final ObjectMapper objectMapper; - protected final DataSource dataSource; - - private final RetryTemplate retryTemplate; - - protected PostgresBaseDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - this.retryTemplate = retryTemplate; - this.objectMapper = objectMapper; - this.dataSource = dataSource; - } - - protected final LazyToString getCallingMethod() { - return new LazyToString( - () -> - Arrays.stream(Thread.currentThread().getStackTrace()) - .filter( - ste -> - !EXCLUDED_STACKTRACE_CLASS.contains( - ste.getClassName())) - .findFirst() - .map(StackTraceElement::getMethodName) - .orElseThrow(() -> new NullPointerException("Cannot find Caller"))); - } - - protected String toJson(Object value) { - try { - return objectMapper.writeValueAsString(value); - } catch (JsonProcessingException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - protected T readValue(String json, Class tClass) { - try { - return objectMapper.readValue(json, tClass); - } catch (IOException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - protected T readValue(String json, TypeReference typeReference) { - try { - return objectMapper.readValue(json, typeReference); - } catch (IOException ex) { - throw new ApplicationException(INTERNAL_ERROR, ex); - } - } - - /** - * Initialize a new transactional {@link Connection} from {@link #dataSource} and pass it to - * {@literal function}. - * - *

Successful executions of {@literal function} will result in a commit and return of {@link - * TransactionalFunction#apply(Connection)}. - * - *

If any {@link Throwable} thrown from {@code TransactionalFunction#apply(Connection)} will - * result in a rollback of the transaction and will be wrapped in an {@link - * ApplicationException} if it is not already one. - * - *

Generally this is used to wrap multiple {@link #execute(Connection, String, - * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that - * produce some expected return value. - * - * @param function The function to apply with a new transactional {@link Connection} - * @param The return type. - * @return The result of {@code TransactionalFunction#apply(Connection)} - * @throws ApplicationException If any errors occur. - */ - private R getWithTransaction(final TransactionalFunction function) { - final Instant start = Instant.now(); - LazyToString callingMethod = getCallingMethod(); - logger.trace("{} : starting transaction", callingMethod); - - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(false); - try { - R result = function.apply(tx); - tx.commit(); - return result; - } catch (Throwable th) { - tx.rollback(); - if (th instanceof ApplicationException) { - throw th; - } - throw new ApplicationException(BACKEND_ERROR, th.getMessage(), th); - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } finally { - logger.trace( - "{} : took {}ms", - callingMethod, - Duration.between(start, Instant.now()).toMillis()); - } - } - - R getWithRetriedTransactions(final TransactionalFunction function) { - try { - return retryTemplate.execute(context -> getWithTransaction(function)); - } catch (Exception e) { - throw (ApplicationException) e; - } - } - - protected R getWithTransactionWithOutErrorPropagation(TransactionalFunction function) { - Instant start = Instant.now(); - LazyToString callingMethod = getCallingMethod(); - logger.trace("{} : starting transaction", callingMethod); - - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(false); - try { - R result = function.apply(tx); - tx.commit(); - return result; - } catch (Throwable th) { - tx.rollback(); - logger.info(CONFLICT + " " + th.getMessage()); - return null; - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } finally { - logger.trace( - "{} : took {}ms", - callingMethod, - Duration.between(start, Instant.now()).toMillis()); - } - } - - /** - * Wraps {@link #getWithRetriedTransactions(TransactionalFunction)} with no return value. - * - *

Generally this is used to wrap multiple {@link #execute(Connection, String, - * ExecuteFunction)} or {@link #query(Connection, String, QueryFunction)} invocations that - * produce no expected return value. - * - * @param consumer The {@link Consumer} callback to pass a transactional {@link Connection} to. - * @throws ApplicationException If any errors occur. - * @see #getWithRetriedTransactions(TransactionalFunction) - */ - protected void withTransaction(Consumer consumer) { - getWithRetriedTransactions( - connection -> { - consumer.accept(connection); - return null; - }); - } - - /** - * Initiate a new transaction and execute a {@link Query} within that context, then return the - * results of {@literal function}. - * - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - * @param The expected return type of {@literal function}. - * @return The results of applying {@literal function}. - */ - protected R queryWithTransaction(String query, QueryFunction function) { - return getWithRetriedTransactions(tx -> query(tx, query, function)); - } - - /** - * Execute a {@link Query} within the context of a given transaction and return the results of - * {@literal function}. - * - * @param tx The transactional {@link Connection} to use. - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - * @param The expected return type of {@literal function}. - * @return The results of applying {@literal function}. - */ - protected R query(Connection tx, String query, QueryFunction function) { - try (Query q = new Query(objectMapper, tx, query)) { - return function.apply(q); - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex); - } - } - - /** - * Execute a statement with no expected return value within a given transaction. - * - * @param tx The transactional {@link Connection} to use. - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - */ - protected void execute(Connection tx, String query, ExecuteFunction function) { - try (Query q = new Query(objectMapper, tx, query)) { - function.apply(q); - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex); - } - } - - /** - * Instantiates a new transactional connection and invokes {@link #execute(Connection, String, - * ExecuteFunction)} - * - * @param query The query string to prepare. - * @param function The functional callback to pass a {@link Query} to. - */ - protected void executeWithTransaction(String query, ExecuteFunction function) { - withTransaction(tx -> execute(tx, query, function)); - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java deleted file mode 100644 index df90aae8bd..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAO.java +++ /dev/null @@ -1,1075 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.sql.Connection; -import java.sql.Date; -import java.sql.SQLException; -import java.text.SimpleDateFormat; -import java.util.*; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.common.metadata.events.EventExecution; -import com.netflix.conductor.common.metadata.tasks.PollData; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.dao.ConcurrentExecutionLimitDAO; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.PollDataDAO; -import com.netflix.conductor.dao.RateLimitingDAO; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.model.TaskModel; -import com.netflix.conductor.model.WorkflowModel; -import com.netflix.conductor.postgres.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.BACKEND_ERROR; - -public class PostgresExecutionDAO extends PostgresBaseDAO - implements ExecutionDAO, RateLimitingDAO, PollDataDAO, ConcurrentExecutionLimitDAO { - - private static final String ARCHIVED_FIELD = "archived"; - private static final String RAW_JSON_FIELD = "rawJSON"; - - public PostgresExecutionDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - super(retryTemplate, objectMapper, dataSource); - } - - private static String dateStr(Long timeInMs) { - Date date = new Date(timeInMs); - return dateStr(date); - } - - private static String dateStr(Date date) { - SimpleDateFormat format = new SimpleDateFormat("yyyyMMdd"); - return format.format(date); - } - - @Override - public List getPendingTasksByWorkflow(String taskDefName, String workflowId) { - // @formatter:off - String GET_IN_PROGRESS_TASKS_FOR_WORKFLOW = - "SELECT json_data FROM task_in_progress tip " - + "INNER JOIN task t ON t.task_id = tip.task_id " - + "WHERE task_def_name = ? AND workflow_id = ? FOR SHARE"; - // @formatter:on - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_FOR_WORKFLOW, - q -> - q.addParameter(taskDefName) - .addParameter(workflowId) - .executeAndFetch(TaskModel.class)); - } - - @Override - public List getTasks(String taskDefName, String startKey, int count) { - List tasks = new ArrayList<>(count); - - List pendingTasks = getPendingTasksForTaskType(taskDefName); - boolean startKeyFound = startKey == null; - int found = 0; - for (TaskModel pendingTask : pendingTasks) { - if (!startKeyFound) { - if (pendingTask.getTaskId().equals(startKey)) { - startKeyFound = true; - // noinspection ConstantConditions - if (startKey != null) { - continue; - } - } - } - if (startKeyFound && found < count) { - tasks.add(pendingTask); - found++; - } - } - - return tasks; - } - - private static String taskKey(TaskModel task) { - return task.getReferenceTaskName() + "_" + task.getRetryCount(); - } - - @Override - public List createTasks(List tasks) { - List created = Lists.newArrayListWithCapacity(tasks.size()); - - for (TaskModel task : tasks) { - withTransaction( - connection -> { - validate(task); - - task.setScheduledTime(System.currentTimeMillis()); - - final String taskKey = taskKey(task); - - boolean scheduledTaskAdded = addScheduledTask(connection, task, taskKey); - - if (!scheduledTaskAdded) { - logger.trace( - "Task already scheduled, skipping the run " - + task.getTaskId() - + ", ref=" - + task.getReferenceTaskName() - + ", key=" - + taskKey); - return; - } - - insertOrUpdateTaskData(connection, task); - addWorkflowToTaskMapping(connection, task); - addTaskInProgress(connection, task); - updateTask(connection, task); - - created.add(task); - }); - } - - return created; - } - - @Override - public void updateTask(TaskModel task) { - withTransaction(connection -> updateTask(connection, task)); - } - - /** - * This is a dummy implementation and this feature is not for Postgres backed Conductor - * - * @param task: which needs to be evaluated whether it is rateLimited or not - */ - @Override - public boolean exceedsRateLimitPerFrequency(TaskModel task, TaskDef taskDef) { - return false; - } - - @Override - public boolean exceedsLimit(TaskModel task) { - - Optional taskDefinition = task.getTaskDefinition(); - if (taskDefinition.isEmpty()) { - return false; - } - - TaskDef taskDef = taskDefinition.get(); - - int limit = taskDef.concurrencyLimit(); - if (limit <= 0) { - return false; - } - - long current = getInProgressTaskCount(task.getTaskDefName()); - - if (current >= limit) { - Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); - return true; - } - - logger.info( - "Task execution count for {}: limit={}, current={}", - task.getTaskDefName(), - limit, - getInProgressTaskCount(task.getTaskDefName())); - - String taskId = task.getTaskId(); - - List tasksInProgressInOrderOfArrival = - findAllTasksInProgressInOrderOfArrival(task, limit); - - boolean rateLimited = !tasksInProgressInOrderOfArrival.contains(taskId); - - if (rateLimited) { - logger.info( - "Task execution count limited. {}, limit {}, current {}", - task.getTaskDefName(), - limit, - getInProgressTaskCount(task.getTaskDefName())); - Monitors.recordTaskConcurrentExecutionLimited(task.getTaskDefName(), limit); - } - - return rateLimited; - } - - @Override - public boolean removeTask(String taskId) { - TaskModel task = getTask(taskId); - - if (task == null) { - logger.warn("No such task found by id {}", taskId); - return false; - } - - final String taskKey = taskKey(task); - - withTransaction( - connection -> { - removeScheduledTask(connection, task, taskKey); - removeWorkflowToTaskMapping(connection, task); - removeTaskInProgress(connection, task); - removeTaskData(connection, task); - }); - return true; - } - - @Override - public TaskModel getTask(String taskId) { - String GET_TASK = "SELECT json_data FROM task WHERE task_id = ?"; - return queryWithTransaction( - GET_TASK, q -> q.addParameter(taskId).executeAndFetchFirst(TaskModel.class)); - } - - @Override - public List getTasks(List taskIds) { - if (taskIds.isEmpty()) { - return Lists.newArrayList(); - } - return getWithRetriedTransactions(c -> getTasks(c, taskIds)); - } - - @Override - public List getPendingTasksForTaskType(String taskName) { - Preconditions.checkNotNull(taskName, "task name cannot be null"); - // @formatter:off - String GET_IN_PROGRESS_TASKS_FOR_TYPE = - "SELECT json_data FROM task_in_progress tip " - + "INNER JOIN task t ON t.task_id = tip.task_id " - + "WHERE task_def_name = ? FOR UPDATE SKIP LOCKED"; - // @formatter:on - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_FOR_TYPE, - q -> q.addParameter(taskName).executeAndFetch(TaskModel.class)); - } - - @Override - public List getTasksForWorkflow(String workflowId) { - String GET_TASKS_FOR_WORKFLOW = - "SELECT task_id FROM workflow_to_task WHERE workflow_id = ? FOR SHARE"; - return getWithRetriedTransactions( - tx -> - query( - tx, - GET_TASKS_FOR_WORKFLOW, - q -> { - List taskIds = - q.addParameter(workflowId) - .executeScalarList(String.class); - return getTasks(tx, taskIds); - })); - } - - @Override - public String createWorkflow(WorkflowModel workflow) { - return insertOrUpdateWorkflow(workflow, false); - } - - @Override - public String updateWorkflow(WorkflowModel workflow) { - return insertOrUpdateWorkflow(workflow, true); - } - - @Override - public boolean removeWorkflow(String workflowId) { - boolean removed = false; - WorkflowModel workflow = getWorkflow(workflowId, true); - if (workflow != null) { - withTransaction( - connection -> { - removeWorkflowDefToWorkflowMapping(connection, workflow); - removeWorkflow(connection, workflowId); - removePendingWorkflow(connection, workflow.getWorkflowName(), workflowId); - }); - removed = true; - - for (TaskModel task : workflow.getTasks()) { - if (!removeTask(task.getTaskId())) { - removed = false; - } - } - } - return removed; - } - - /** - * This is a dummy implementation and this feature is not supported for Postgres backed - * Conductor - */ - @Override - public boolean removeWorkflowWithExpiry(String workflowId, int ttlSeconds) { - throw new UnsupportedOperationException( - "This method is not implemented in MySQLExecutionDAO. Please use RedisDAO mode instead for using TTLs."); - } - - @Override - public void removeFromPendingWorkflow(String workflowType, String workflowId) { - withTransaction(connection -> removePendingWorkflow(connection, workflowType, workflowId)); - } - - @Override - public WorkflowModel getWorkflow(String workflowId) { - return getWorkflow(workflowId, true); - } - - @Override - public WorkflowModel getWorkflow(String workflowId, boolean includeTasks) { - WorkflowModel workflow = getWithRetriedTransactions(tx -> readWorkflow(tx, workflowId)); - - if (workflow != null) { - if (includeTasks) { - List tasks = getTasksForWorkflow(workflowId); - tasks.sort(Comparator.comparingInt(TaskModel::getSeq)); - workflow.setTasks(tasks); - } - } - return workflow; - } - - /** - * @param workflowName name of the workflow - * @param version the workflow version - * @return list of workflow ids that are in RUNNING state returns workflows of all versions - * for the given workflow name - */ - @Override - public List getRunningWorkflowIds(String workflowName, int version) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - String GET_PENDING_WORKFLOW_IDS = - "SELECT workflow_id FROM workflow_pending WHERE workflow_type = ? FOR SHARE SKIP LOCKED"; - - return queryWithTransaction( - GET_PENDING_WORKFLOW_IDS, - q -> q.addParameter(workflowName).executeScalarList(String.class)); - } - - /** - * @param workflowName Name of the workflow - * @param version the workflow version - * @return list of workflows that are in RUNNING state - */ - @Override - public List getPendingWorkflowsByType(String workflowName, int version) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - return getRunningWorkflowIds(workflowName, version).stream() - .map(this::getWorkflow) - .filter(workflow -> workflow.getWorkflowVersion() == version) - .collect(Collectors.toList()); - } - - @Override - public long getPendingWorkflowCount(String workflowName) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - String GET_PENDING_WORKFLOW_COUNT = - "SELECT COUNT(*) FROM workflow_pending WHERE workflow_type = ?"; - - return queryWithTransaction( - GET_PENDING_WORKFLOW_COUNT, q -> q.addParameter(workflowName).executeCount()); - } - - @Override - public long getInProgressTaskCount(String taskDefName) { - String GET_IN_PROGRESS_TASK_COUNT = - "SELECT COUNT(*) FROM task_in_progress WHERE task_def_name = ? AND in_progress_status = true"; - - return queryWithTransaction( - GET_IN_PROGRESS_TASK_COUNT, q -> q.addParameter(taskDefName).executeCount()); - } - - @Override - public List getWorkflowsByType( - String workflowName, Long startTime, Long endTime) { - Preconditions.checkNotNull(workflowName, "workflowName cannot be null"); - Preconditions.checkNotNull(startTime, "startTime cannot be null"); - Preconditions.checkNotNull(endTime, "endTime cannot be null"); - - List workflows = new LinkedList<>(); - - withTransaction( - tx -> { - // @formatter:off - String GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF = - "SELECT workflow_id FROM workflow_def_to_workflow " - + "WHERE workflow_def = ? AND date_str BETWEEN ? AND ? FOR SHARE SKIP LOCKED"; - // @formatter:on - - List workflowIds = - query( - tx, - GET_ALL_WORKFLOWS_FOR_WORKFLOW_DEF, - q -> - q.addParameter(workflowName) - .addParameter(dateStr(startTime)) - .addParameter(dateStr(endTime)) - .executeScalarList(String.class)); - workflowIds.forEach( - workflowId -> { - try { - WorkflowModel wf = getWorkflow(workflowId); - if (wf.getCreateTime() >= startTime - && wf.getCreateTime() <= endTime) { - workflows.add(wf); - } - } catch (Exception e) { - logger.error( - "Unable to load workflow id {} with name {}", - workflowId, - workflowName, - e); - } - }); - }); - - return workflows; - } - - @Override - public List getWorkflowsByCorrelationId( - String workflowName, String correlationId, boolean includeTasks) { - Preconditions.checkNotNull(correlationId, "correlationId cannot be null"); - String GET_WORKFLOWS_BY_CORRELATION_ID = - "SELECT w.json_data FROM workflow w left join workflow_def_to_workflow wd on w.workflow_id = wd.workflow_id WHERE w.correlation_id = ? and wd.workflow_def = ? FOR SHARE SKIP LOCKED"; - - return queryWithTransaction( - GET_WORKFLOWS_BY_CORRELATION_ID, - q -> - q.addParameter(correlationId) - .addParameter(workflowName) - .executeAndFetch(WorkflowModel.class)); - } - - @Override - public boolean canSearchAcrossWorkflows() { - return true; - } - - @Override - public boolean addEventExecution(EventExecution eventExecution) { - try { - return getWithRetriedTransactions(tx -> insertEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to add event execution " + eventExecution.getId(), - e); - } - } - - @Override - public void removeEventExecution(EventExecution eventExecution) { - try { - withTransaction(tx -> removeEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to remove event execution " + eventExecution.getId(), - e); - } - } - - @Override - public void updateEventExecution(EventExecution eventExecution) { - try { - withTransaction(tx -> updateEventExecution(tx, eventExecution)); - } catch (Exception e) { - throw new ApplicationException( - ApplicationException.Code.BACKEND_ERROR, - "Unable to update event execution " + eventExecution.getId(), - e); - } - } - - public List getEventExecutions( - String eventHandlerName, String eventName, String messageId, int max) { - try { - List executions = Lists.newLinkedList(); - withTransaction( - tx -> { - for (int i = 0; i < max; i++) { - String executionId = - messageId + "_" - + i; // see SimpleEventProcessor.handle to understand - // how the - // execution id is set - EventExecution ee = - readEventExecution( - tx, - eventHandlerName, - eventName, - messageId, - executionId); - if (ee == null) { - break; - } - executions.add(ee); - } - }); - return executions; - } catch (Exception e) { - String message = - String.format( - "Unable to get event executions for eventHandlerName=%s, eventName=%s, messageId=%s", - eventHandlerName, eventName, messageId); - throw new ApplicationException(ApplicationException.Code.BACKEND_ERROR, message, e); - } - } - - @Override - public void updateLastPollData(String taskDefName, String domain, String workerId) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - PollData pollData = new PollData(taskDefName, domain, workerId, System.currentTimeMillis()); - String effectiveDomain = (domain == null) ? "DEFAULT" : domain; - withTransaction(tx -> insertOrUpdatePollData(tx, pollData, effectiveDomain)); - } - - @Override - public PollData getPollData(String taskDefName, String domain) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - String effectiveDomain = (domain == null) ? "DEFAULT" : domain; - return getWithRetriedTransactions(tx -> readPollData(tx, taskDefName, effectiveDomain)); - } - - @Override - public List getPollData(String taskDefName) { - Preconditions.checkNotNull(taskDefName, "taskDefName name cannot be null"); - return readAllPollData(taskDefName); - } - - @Override - public List getAllPollData() { - try (Connection tx = dataSource.getConnection()) { - boolean previousAutoCommitMode = tx.getAutoCommit(); - tx.setAutoCommit(true); - try { - String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data ORDER BY queue_name"; - return query(tx, GET_ALL_POLL_DATA, q -> q.executeAndFetch(PollData.class)); - } catch (Throwable th) { - throw new ApplicationException(BACKEND_ERROR, th.getMessage(), th); - } finally { - tx.setAutoCommit(previousAutoCommitMode); - } - } catch (SQLException ex) { - throw new ApplicationException(BACKEND_ERROR, ex.getMessage(), ex); - } - } - - private List getTasks(Connection connection, List taskIds) { - if (taskIds.isEmpty()) { - return Lists.newArrayList(); - } - - // Generate a formatted query string with a variable number of bind params based - // on taskIds.size() - final String GET_TASKS_FOR_IDS = - String.format( - "SELECT json_data FROM task WHERE task_id IN (%s) AND json_data IS NOT NULL", - Query.generateInBindings(taskIds.size())); - - return query( - connection, - GET_TASKS_FOR_IDS, - q -> q.addParameters(taskIds).executeAndFetch(TaskModel.class)); - } - - private String insertOrUpdateWorkflow(WorkflowModel workflow, boolean update) { - Preconditions.checkNotNull(workflow, "workflow object cannot be null"); - - boolean terminal = workflow.getStatus().isTerminal(); - - List tasks = workflow.getTasks(); - workflow.setTasks(Lists.newLinkedList()); - - withTransaction( - tx -> { - if (!update) { - addWorkflow(tx, workflow); - addWorkflowDefToWorkflowMapping(tx, workflow); - } else { - updateWorkflow(tx, workflow); - } - - if (terminal) { - removePendingWorkflow( - tx, workflow.getWorkflowName(), workflow.getWorkflowId()); - } else { - addPendingWorkflow( - tx, workflow.getWorkflowName(), workflow.getWorkflowId()); - } - }); - - workflow.setTasks(tasks); - return workflow.getWorkflowId(); - } - - private void updateTask(Connection connection, TaskModel task) { - Optional taskDefinition = task.getTaskDefinition(); - - if (taskDefinition.isPresent() && taskDefinition.get().concurrencyLimit() > 0) { - boolean inProgress = - task.getStatus() != null - && task.getStatus().equals(TaskModel.Status.IN_PROGRESS); - updateInProgressStatus(connection, task, inProgress); - } - - insertOrUpdateTaskData(connection, task); - - if (task.getStatus() != null && task.getStatus().isTerminal()) { - removeTaskInProgress(connection, task); - } - - addWorkflowToTaskMapping(connection, task); - } - - private WorkflowModel readWorkflow(Connection connection, String workflowId) { - String GET_WORKFLOW = "SELECT json_data FROM workflow WHERE workflow_id = ?"; - - return query( - connection, - GET_WORKFLOW, - q -> q.addParameter(workflowId).executeAndFetchFirst(WorkflowModel.class)); - } - - private void addWorkflow(Connection connection, WorkflowModel workflow) { - String INSERT_WORKFLOW = - "INSERT INTO workflow (workflow_id, correlation_id, json_data) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowId()) - .addParameter(workflow.getCorrelationId()) - .addJsonParameter(workflow) - .executeUpdate()); - } - - private void updateWorkflow(Connection connection, WorkflowModel workflow) { - String UPDATE_WORKFLOW = - "UPDATE workflow SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE workflow_id = ?"; - - execute( - connection, - UPDATE_WORKFLOW, - q -> - q.addJsonParameter(workflow) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - private void removeWorkflow(Connection connection, String workflowId) { - String REMOVE_WORKFLOW = "DELETE FROM workflow WHERE workflow_id = ?"; - execute(connection, REMOVE_WORKFLOW, q -> q.addParameter(workflowId).executeDelete()); - } - - private void addPendingWorkflow(Connection connection, String workflowType, String workflowId) { - - String EXISTS_PENDING_WORKFLOW = - "SELECT EXISTS(SELECT 1 FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).exists()); - - if (!exists) { - String INSERT_PENDING_WORKFLOW = - "INSERT INTO workflow_pending (workflow_type, workflow_id) VALUES (?, ?) ON CONFLICT (workflow_type,workflow_id) DO NOTHING"; - - execute( - connection, - INSERT_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).executeUpdate()); - } - } - - private void removePendingWorkflow( - Connection connection, String workflowType, String workflowId) { - String REMOVE_PENDING_WORKFLOW = - "DELETE FROM workflow_pending WHERE workflow_type = ? AND workflow_id = ?"; - - execute( - connection, - REMOVE_PENDING_WORKFLOW, - q -> q.addParameter(workflowType).addParameter(workflowId).executeDelete()); - } - - private void insertOrUpdateTaskData(Connection connection, TaskModel task) { - /* - * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that - * is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. - */ - String UPDATE_TASK = - "UPDATE task SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE task_id=?"; - int rowsUpdated = - query( - connection, - UPDATE_TASK, - q -> - q.addJsonParameter(task) - .addParameter(task.getTaskId()) - .executeUpdate()); - - if (rowsUpdated == 0) { - String INSERT_TASK = - "INSERT INTO task (task_id, json_data, modified_on) VALUES (?, ?, CURRENT_TIMESTAMP) ON CONFLICT (task_id) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on"; - execute( - connection, - INSERT_TASK, - q -> q.addParameter(task.getTaskId()).addJsonParameter(task).executeUpdate()); - } - } - - private void removeTaskData(Connection connection, TaskModel task) { - String REMOVE_TASK = "DELETE FROM task WHERE task_id = ?"; - execute(connection, REMOVE_TASK, q -> q.addParameter(task.getTaskId()).executeDelete()); - } - - private void addWorkflowToTaskMapping(Connection connection, TaskModel task) { - - String EXISTS_WORKFLOW_TO_TASK = - "SELECT EXISTS(SELECT 1 FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .exists()); - - if (!exists) { - String INSERT_WORKFLOW_TO_TASK = - "INSERT INTO workflow_to_task (workflow_id, task_id) VALUES (?, ?) ON CONFLICT (workflow_id,task_id) DO NOTHING"; - - execute( - connection, - INSERT_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - } - - private void removeWorkflowToTaskMapping(Connection connection, TaskModel task) { - String REMOVE_WORKFLOW_TO_TASK = - "DELETE FROM workflow_to_task WHERE workflow_id = ? AND task_id = ?"; - - execute( - connection, - REMOVE_WORKFLOW_TO_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(task.getTaskId()) - .executeDelete()); - } - - private void addWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { - String INSERT_WORKFLOW_DEF_TO_WORKFLOW = - "INSERT INTO workflow_def_to_workflow (workflow_def, date_str, workflow_id) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_WORKFLOW_DEF_TO_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowName()) - .addParameter(dateStr(workflow.getCreateTime())) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - private void removeWorkflowDefToWorkflowMapping(Connection connection, WorkflowModel workflow) { - String REMOVE_WORKFLOW_DEF_TO_WORKFLOW = - "DELETE FROM workflow_def_to_workflow WHERE workflow_def = ? AND date_str = ? AND workflow_id = ?"; - - execute( - connection, - REMOVE_WORKFLOW_DEF_TO_WORKFLOW, - q -> - q.addParameter(workflow.getWorkflowName()) - .addParameter(dateStr(workflow.getCreateTime())) - .addParameter(workflow.getWorkflowId()) - .executeUpdate()); - } - - @VisibleForTesting - boolean addScheduledTask(Connection connection, TaskModel task, String taskKey) { - - final String EXISTS_SCHEDULED_TASK = - "SELECT EXISTS(SELECT 1 FROM task_scheduled where workflow_id = ? AND task_key = ?)"; - - boolean exists = - query( - connection, - EXISTS_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .exists()); - - if (!exists) { - final String INSERT_IGNORE_SCHEDULED_TASK = - "INSERT INTO task_scheduled (workflow_id, task_key, task_id) VALUES (?, ?, ?) ON CONFLICT (workflow_id,task_key) DO NOTHING"; - - int count = - query( - connection, - INSERT_IGNORE_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .addParameter(task.getTaskId()) - .executeUpdate()); - return count > 0; - } else { - return false; - } - } - - private void removeScheduledTask(Connection connection, TaskModel task, String taskKey) { - String REMOVE_SCHEDULED_TASK = - "DELETE FROM task_scheduled WHERE workflow_id = ? AND task_key = ?"; - execute( - connection, - REMOVE_SCHEDULED_TASK, - q -> - q.addParameter(task.getWorkflowInstanceId()) - .addParameter(taskKey) - .executeDelete()); - } - - private void addTaskInProgress(Connection connection, TaskModel task) { - String EXISTS_IN_PROGRESS_TASK = - "SELECT EXISTS(SELECT 1 FROM task_in_progress WHERE task_def_name = ? AND task_id = ?)"; - - boolean exists = - query( - connection, - EXISTS_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .exists()); - - if (!exists) { - String INSERT_IN_PROGRESS_TASK = - "INSERT INTO task_in_progress (task_def_name, task_id, workflow_id) VALUES (?, ?, ?)"; - - execute( - connection, - INSERT_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .addParameter(task.getWorkflowInstanceId()) - .executeUpdate()); - } - } - - private void removeTaskInProgress(Connection connection, TaskModel task) { - String REMOVE_IN_PROGRESS_TASK = - "DELETE FROM task_in_progress WHERE task_def_name = ? AND task_id = ?"; - - execute( - connection, - REMOVE_IN_PROGRESS_TASK, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - - private void updateInProgressStatus(Connection connection, TaskModel task, boolean inProgress) { - String UPDATE_IN_PROGRESS_TASK_STATUS = - "UPDATE task_in_progress SET in_progress_status = ?, modified_on = CURRENT_TIMESTAMP " - + "WHERE task_def_name = ? AND task_id = ?"; - - execute( - connection, - UPDATE_IN_PROGRESS_TASK_STATUS, - q -> - q.addParameter(inProgress) - .addParameter(task.getTaskDefName()) - .addParameter(task.getTaskId()) - .executeUpdate()); - } - - private boolean insertEventExecution(Connection connection, EventExecution eventExecution) { - - String INSERT_EVENT_EXECUTION = - "INSERT INTO event_execution (event_handler_name, event_name, message_id, execution_id, json_data) " - + "VALUES (?, ?, ?, ?, ?)"; - int count = - query( - connection, - INSERT_EVENT_EXECUTION, - q -> - q.addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .addJsonParameter(eventExecution) - .executeUpdate()); - return count > 0; - } - - private void updateEventExecution(Connection connection, EventExecution eventExecution) { - // @formatter:off - String UPDATE_EVENT_EXECUTION = - "UPDATE event_execution SET " - + "json_data = ?, " - + "modified_on = CURRENT_TIMESTAMP " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - // @formatter:on - - execute( - connection, - UPDATE_EVENT_EXECUTION, - q -> - q.addJsonParameter(eventExecution) - .addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .executeUpdate()); - } - - private void removeEventExecution(Connection connection, EventExecution eventExecution) { - String REMOVE_EVENT_EXECUTION = - "DELETE FROM event_execution " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - - execute( - connection, - REMOVE_EVENT_EXECUTION, - q -> - q.addParameter(eventExecution.getName()) - .addParameter(eventExecution.getEvent()) - .addParameter(eventExecution.getMessageId()) - .addParameter(eventExecution.getId()) - .executeUpdate()); - } - - private EventExecution readEventExecution( - Connection connection, - String eventHandlerName, - String eventName, - String messageId, - String executionId) { - // @formatter:off - String GET_EVENT_EXECUTION = - "SELECT json_data FROM event_execution " - + "WHERE event_handler_name = ? " - + "AND event_name = ? " - + "AND message_id = ? " - + "AND execution_id = ?"; - // @formatter:on - return query( - connection, - GET_EVENT_EXECUTION, - q -> - q.addParameter(eventHandlerName) - .addParameter(eventName) - .addParameter(messageId) - .addParameter(executionId) - .executeAndFetchFirst(EventExecution.class)); - } - - private void insertOrUpdatePollData(Connection connection, PollData pollData, String domain) { - /* - * Most times the row will be updated so let's try the update first. This used to be an 'INSERT/ON CONFLICT do update' sql statement. The problem with that - * is that if we try the INSERT first, the sequence will be increased even if the ON CONFLICT happens. Since polling happens *a lot*, the sequence can increase - * dramatically even though it won't be used. - */ - String UPDATE_POLL_DATA = - "UPDATE poll_data SET json_data=?, modified_on=CURRENT_TIMESTAMP WHERE queue_name=? AND domain=?"; - int rowsUpdated = - query( - connection, - UPDATE_POLL_DATA, - q -> - q.addJsonParameter(pollData) - .addParameter(pollData.getQueueName()) - .addParameter(domain) - .executeUpdate()); - - if (rowsUpdated == 0) { - String INSERT_POLL_DATA = - "INSERT INTO poll_data (queue_name, domain, json_data, modified_on) VALUES (?, ?, ?, CURRENT_TIMESTAMP) ON CONFLICT (queue_name,domain) DO UPDATE SET json_data=excluded.json_data, modified_on=excluded.modified_on"; - execute( - connection, - INSERT_POLL_DATA, - q -> - q.addParameter(pollData.getQueueName()) - .addParameter(domain) - .addJsonParameter(pollData) - .executeUpdate()); - } - } - - private PollData readPollData(Connection connection, String queueName, String domain) { - String GET_POLL_DATA = - "SELECT json_data FROM poll_data WHERE queue_name = ? AND domain = ?"; - return query( - connection, - GET_POLL_DATA, - q -> - q.addParameter(queueName) - .addParameter(domain) - .executeAndFetchFirst(PollData.class)); - } - - private List readAllPollData(String queueName) { - String GET_ALL_POLL_DATA = "SELECT json_data FROM poll_data WHERE queue_name = ?"; - return queryWithTransaction( - GET_ALL_POLL_DATA, q -> q.addParameter(queueName).executeAndFetch(PollData.class)); - } - - private List findAllTasksInProgressInOrderOfArrival(TaskModel task, int limit) { - String GET_IN_PROGRESS_TASKS_WITH_LIMIT = - "SELECT task_id FROM task_in_progress WHERE task_def_name = ? ORDER BY created_on LIMIT ?"; - - return queryWithTransaction( - GET_IN_PROGRESS_TASKS_WITH_LIMIT, - q -> - q.addParameter(task.getTaskDefName()) - .addParameter(limit) - .executeScalarList(String.class)); - } - - private void validate(TaskModel task) { - Preconditions.checkNotNull(task, "task object cannot be null"); - Preconditions.checkNotNull(task.getTaskId(), "Task id cannot be null"); - Preconditions.checkNotNull( - task.getWorkflowInstanceId(), "Workflow instance id cannot be null"); - Preconditions.checkNotNull( - task.getReferenceTaskName(), "Task reference name cannot be null"); - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java deleted file mode 100644 index ffe752e4b2..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAO.java +++ /dev/null @@ -1,551 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.sql.Connection; -import java.util.*; -import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.common.metadata.events.EventHandler; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.dao.EventHandlerDAO; -import com.netflix.conductor.dao.MetadataDAO; -import com.netflix.conductor.metrics.Monitors; -import com.netflix.conductor.postgres.config.PostgresProperties; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.base.Preconditions; - -public class PostgresMetadataDAO extends PostgresBaseDAO implements MetadataDAO, EventHandlerDAO { - - private final ConcurrentHashMap taskDefCache = new ConcurrentHashMap<>(); - private static final String CLASS_NAME = PostgresMetadataDAO.class.getSimpleName(); - - public PostgresMetadataDAO( - RetryTemplate retryTemplate, - ObjectMapper objectMapper, - DataSource dataSource, - PostgresProperties properties) { - super(retryTemplate, objectMapper, dataSource); - - long cacheRefreshTime = properties.getTaskDefCacheRefreshInterval().getSeconds(); - Executors.newSingleThreadScheduledExecutor() - .scheduleWithFixedDelay( - this::refreshTaskDefs, - cacheRefreshTime, - cacheRefreshTime, - TimeUnit.SECONDS); - } - - @Override - public void createTaskDef(TaskDef taskDef) { - validate(taskDef); - insertOrUpdateTaskDef(taskDef); - } - - @Override - public String updateTaskDef(TaskDef taskDef) { - validate(taskDef); - return insertOrUpdateTaskDef(taskDef); - } - - @Override - public TaskDef getTaskDef(String name) { - Preconditions.checkNotNull(name, "TaskDef name cannot be null"); - TaskDef taskDef = taskDefCache.get(name); - if (taskDef == null) { - if (logger.isTraceEnabled()) { - logger.trace("Cache miss: {}", name); - } - taskDef = getTaskDefFromDB(name); - } - - return taskDef; - } - - @Override - public List getAllTaskDefs() { - return getWithRetriedTransactions(this::findAllTaskDefs); - } - - @Override - public void removeTaskDef(String name) { - final String DELETE_TASKDEF_QUERY = "DELETE FROM meta_task_def WHERE name = ?"; - - executeWithTransaction( - DELETE_TASKDEF_QUERY, - q -> { - if (!q.addParameter(name).executeDelete()) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, "No such task definition"); - } - - taskDefCache.remove(name); - }); - } - - @Override - public void createWorkflowDef(WorkflowDef def) { - validate(def); - - withTransaction( - tx -> { - if (workflowExists(tx, def)) { - throw new ApplicationException( - ApplicationException.Code.CONFLICT, - "Workflow with " + def.key() + " already exists!"); - } - - insertOrUpdateWorkflowDef(tx, def); - }); - } - - @Override - public void updateWorkflowDef(WorkflowDef def) { - validate(def); - withTransaction(tx -> insertOrUpdateWorkflowDef(tx, def)); - } - - @Override - public Optional getLatestWorkflowDef(String name) { - final String GET_LATEST_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND " - + "version = latest_version"; - - return Optional.ofNullable( - queryWithTransaction( - GET_LATEST_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(WorkflowDef.class))); - } - - @Override - public Optional getWorkflowDef(String name, int version) { - final String GET_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE NAME = ? AND version = ?"; - return Optional.ofNullable( - queryWithTransaction( - GET_WORKFLOW_DEF_QUERY, - q -> - q.addParameter(name) - .addParameter(version) - .executeAndFetchFirst(WorkflowDef.class))); - } - - @Override - public void removeWorkflowDef(String name, Integer version) { - final String DELETE_WORKFLOW_QUERY = - "DELETE from meta_workflow_def WHERE name = ? AND version = ?"; - - withTransaction( - tx -> { - // remove specified workflow - execute( - tx, - DELETE_WORKFLOW_QUERY, - q -> { - if (!q.addParameter(name).addParameter(version).executeDelete()) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - String.format( - "No such workflow definition: %s version: %d", - name, version)); - } - }); - // reset latest version based on remaining rows for this workflow - Optional maxVersion = getLatestVersion(tx, name); - maxVersion.ifPresent(newVersion -> updateLatestVersion(tx, name, newVersion)); - }); - } - - public List findAll() { - final String FIND_ALL_WORKFLOW_DEF_QUERY = "SELECT DISTINCT name FROM meta_workflow_def"; - return queryWithTransaction( - FIND_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(String.class)); - } - - @Override - public List getAllWorkflowDefs() { - final String GET_ALL_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def ORDER BY name, version"; - - return queryWithTransaction( - GET_ALL_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); - } - - public List getAllLatest() { - final String GET_ALL_LATEST_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE version = " + "latest_version"; - - return queryWithTransaction( - GET_ALL_LATEST_WORKFLOW_DEF_QUERY, q -> q.executeAndFetch(WorkflowDef.class)); - } - - public List getAllVersions(String name) { - final String GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY = - "SELECT json_data FROM meta_workflow_def WHERE name = ? " + "ORDER BY version"; - - return queryWithTransaction( - GET_ALL_VERSIONS_WORKFLOW_DEF_QUERY, - q -> q.addParameter(name).executeAndFetch(WorkflowDef.class)); - } - - @Override - public void addEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); - - final String INSERT_EVENT_HANDLER_QUERY = - "INSERT INTO meta_event_handler (name, event, active, json_data) " - + "VALUES (?, ?, ?, ?)"; - - withTransaction( - tx -> { - if (getEventHandler(tx, eventHandler.getName()) != null) { - throw new ApplicationException( - ApplicationException.Code.CONFLICT, - "EventHandler with name " - + eventHandler.getName() - + " already exists!"); - } - - execute( - tx, - INSERT_EVENT_HANDLER_QUERY, - q -> - q.addParameter(eventHandler.getName()) - .addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .executeUpdate()); - }); - } - - @Override - public void updateEventHandler(EventHandler eventHandler) { - Preconditions.checkNotNull(eventHandler.getName(), "EventHandler name cannot be null"); - - // @formatter:off - final String UPDATE_EVENT_HANDLER_QUERY = - "UPDATE meta_event_handler SET " - + "event = ?, active = ?, json_data = ?, " - + "modified_on = CURRENT_TIMESTAMP WHERE name = ?"; - // @formatter:on - - withTransaction( - tx -> { - EventHandler existing = getEventHandler(tx, eventHandler.getName()); - if (existing == null) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + eventHandler.getName() + " not found!"); - } - - execute( - tx, - UPDATE_EVENT_HANDLER_QUERY, - q -> - q.addParameter(eventHandler.getEvent()) - .addParameter(eventHandler.isActive()) - .addJsonParameter(eventHandler) - .addParameter(eventHandler.getName()) - .executeUpdate()); - }); - } - - @Override - public void removeEventHandler(String name) { - final String DELETE_EVENT_HANDLER_QUERY = "DELETE FROM meta_event_handler WHERE name = ?"; - - withTransaction( - tx -> { - EventHandler existing = getEventHandler(tx, name); - if (existing == null) { - throw new ApplicationException( - ApplicationException.Code.NOT_FOUND, - "EventHandler with name " + name + " not found!"); - } - - execute( - tx, - DELETE_EVENT_HANDLER_QUERY, - q -> q.addParameter(name).executeDelete()); - }); - } - - @Override - public List getAllEventHandlers() { - final String READ_ALL_EVENT_HANDLER_QUERY = "SELECT json_data FROM meta_event_handler"; - return queryWithTransaction( - READ_ALL_EVENT_HANDLER_QUERY, q -> q.executeAndFetch(EventHandler.class)); - } - - @Override - public List getEventHandlersForEvent(String event, boolean activeOnly) { - final String READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY = - "SELECT json_data FROM meta_event_handler WHERE event = ?"; - return queryWithTransaction( - READ_ALL_EVENT_HANDLER_BY_EVENT_QUERY, - q -> { - q.addParameter(event); - return q.executeAndFetch( - rs -> { - List handlers = new ArrayList<>(); - while (rs.next()) { - EventHandler h = readValue(rs.getString(1), EventHandler.class); - if (!activeOnly || h.isActive()) { - handlers.add(h); - } - } - - return handlers; - }); - }); - } - - /** - * Use {@link Preconditions} to check for required {@link TaskDef} fields, throwing a Runtime - * exception if validations fail. - * - * @param taskDef The {@code TaskDef} to check. - */ - private void validate(TaskDef taskDef) { - Preconditions.checkNotNull(taskDef, "TaskDef object cannot be null"); - Preconditions.checkNotNull(taskDef.getName(), "TaskDef name cannot be null"); - } - - /** - * Use {@link Preconditions} to check for required {@link WorkflowDef} fields, throwing a - * Runtime exception if validations fail. - * - * @param def The {@code WorkflowDef} to check. - */ - private void validate(WorkflowDef def) { - Preconditions.checkNotNull(def, "WorkflowDef object cannot be null"); - Preconditions.checkNotNull(def.getName(), "WorkflowDef name cannot be null"); - } - - /** - * Retrieve a {@link EventHandler} by {@literal name}. - * - * @param connection The {@link Connection} to use for queries. - * @param name The {@code EventHandler} name to look for. - * @return {@literal null} if nothing is found, otherwise the {@code EventHandler}. - */ - private EventHandler getEventHandler(Connection connection, String name) { - final String READ_ONE_EVENT_HANDLER_QUERY = - "SELECT json_data FROM meta_event_handler WHERE name = ?"; - - return query( - connection, - READ_ONE_EVENT_HANDLER_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(EventHandler.class)); - } - - /** - * Check if a {@link WorkflowDef} with the same {@literal name} and {@literal version} already - * exist. - * - * @param connection The {@link Connection} to use for queries. - * @param def The {@code WorkflowDef} to check for. - * @return {@literal true} if a {@code WorkflowDef} already exists with the same values. - */ - private Boolean workflowExists(Connection connection, WorkflowDef def) { - final String CHECK_WORKFLOW_DEF_EXISTS_QUERY = - "SELECT COUNT(*) FROM meta_workflow_def WHERE name = ? AND " + "version = ?"; - - return query( - connection, - CHECK_WORKFLOW_DEF_EXISTS_QUERY, - q -> q.addParameter(def.getName()).addParameter(def.getVersion()).exists()); - } - - /** - * Return the latest version that exists for the provided {@code name}. - * - * @param tx The {@link Connection} to use for queries. - * @param name The {@code name} to check for. - * @return {@code Optional.empty()} if no versions exist, otherwise the max {@link - * WorkflowDef#getVersion} found. - */ - private Optional getLatestVersion(Connection tx, String name) { - final String GET_LATEST_WORKFLOW_DEF_VERSION = - "SELECT max(version) AS version FROM meta_workflow_def WHERE " + "name = ?"; - - Integer val = - query( - tx, - GET_LATEST_WORKFLOW_DEF_VERSION, - q -> { - q.addParameter(name); - return q.executeAndFetch( - rs -> { - if (!rs.next()) { - return null; - } - - return rs.getInt(1); - }); - }); - - return Optional.ofNullable(val); - } - - /** - * Update the latest version for the workflow with name {@code WorkflowDef} to the version - * provided in {@literal version}. - * - * @param tx The {@link Connection} to use for queries. - * @param name Workflow def name to update - * @param version The new latest {@code version} value. - */ - private void updateLatestVersion(Connection tx, String name, int version) { - final String UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY = - "UPDATE meta_workflow_def SET latest_version = ? " + "WHERE name = ?"; - - execute( - tx, - UPDATE_WORKFLOW_DEF_LATEST_VERSION_QUERY, - q -> q.addParameter(version).addParameter(name).executeUpdate()); - } - - private void insertOrUpdateWorkflowDef(Connection tx, WorkflowDef def) { - final String INSERT_WORKFLOW_DEF_QUERY = - "INSERT INTO meta_workflow_def (name, version, json_data) VALUES (?," + " ?, ?)"; - - Optional version = getLatestVersion(tx, def.getName()); - if (!workflowExists(tx, def)) { - execute( - tx, - INSERT_WORKFLOW_DEF_QUERY, - q -> - q.addParameter(def.getName()) - .addParameter(def.getVersion()) - .addJsonParameter(def) - .executeUpdate()); - } else { - // @formatter:off - final String UPDATE_WORKFLOW_DEF_QUERY = - "UPDATE meta_workflow_def " - + "SET json_data = ?, modified_on = CURRENT_TIMESTAMP " - + "WHERE name = ? AND version = ?"; - // @formatter:on - - execute( - tx, - UPDATE_WORKFLOW_DEF_QUERY, - q -> - q.addJsonParameter(def) - .addParameter(def.getName()) - .addParameter(def.getVersion()) - .executeUpdate()); - } - int maxVersion = def.getVersion(); - if (version.isPresent() && version.get() > def.getVersion()) { - maxVersion = version.get(); - } - - updateLatestVersion(tx, def.getName(), maxVersion); - } - - /** - * Query persistence for all defined {@link TaskDef} data, and cache it in {@link - * #taskDefCache}. - */ - private void refreshTaskDefs() { - try { - withTransaction( - tx -> { - Map map = new HashMap<>(); - findAllTaskDefs(tx).forEach(taskDef -> map.put(taskDef.getName(), taskDef)); - - synchronized (taskDefCache) { - taskDefCache.clear(); - taskDefCache.putAll(map); - } - - if (logger.isTraceEnabled()) { - logger.trace("Refreshed {} TaskDefs", taskDefCache.size()); - } - }); - } catch (Exception e) { - Monitors.error(CLASS_NAME, "refreshTaskDefs"); - logger.error("refresh TaskDefs failed ", e); - } - } - - /** - * Query persistence for all defined {@link TaskDef} data. - * - * @param tx The {@link Connection} to use for queries. - * @return A new {@code List} with all the {@code TaskDef} data that was retrieved. - */ - private List findAllTaskDefs(Connection tx) { - final String READ_ALL_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def"; - - return query(tx, READ_ALL_TASKDEF_QUERY, q -> q.executeAndFetch(TaskDef.class)); - } - - /** - * Explicitly retrieves a {@link TaskDef} from persistence, avoiding {@link #taskDefCache}. - * - * @param name The name of the {@code TaskDef} to query for. - * @return {@literal null} if nothing is found, otherwise the {@code TaskDef}. - */ - private TaskDef getTaskDefFromDB(String name) { - final String READ_ONE_TASKDEF_QUERY = "SELECT json_data FROM meta_task_def WHERE name = ?"; - - return queryWithTransaction( - READ_ONE_TASKDEF_QUERY, - q -> q.addParameter(name).executeAndFetchFirst(TaskDef.class)); - } - - private String insertOrUpdateTaskDef(TaskDef taskDef) { - final String UPDATE_TASKDEF_QUERY = - "UPDATE meta_task_def SET json_data = ?, modified_on = CURRENT_TIMESTAMP WHERE name = ?"; - - final String INSERT_TASKDEF_QUERY = - "INSERT INTO meta_task_def (name, json_data) VALUES (?, ?)"; - - return getWithRetriedTransactions( - tx -> { - execute( - tx, - UPDATE_TASKDEF_QUERY, - update -> { - int result = - update.addJsonParameter(taskDef) - .addParameter(taskDef.getName()) - .executeUpdate(); - if (result == 0) { - execute( - tx, - INSERT_TASKDEF_QUERY, - insert -> - insert.addParameter(taskDef.getName()) - .addJsonParameter(taskDef) - .executeUpdate()); - } - }); - - taskDefCache.put(taskDef.getName(), taskDef); - return taskDef.getName(); - }); - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java deleted file mode 100644 index ed95b3433d..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/dao/PostgresQueueDAO.java +++ /dev/null @@ -1,476 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.sql.Connection; -import java.util.*; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.springframework.retry.support.RetryTemplate; - -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.postgres.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.Maps; -import com.google.common.util.concurrent.Uninterruptibles; - -public class PostgresQueueDAO extends PostgresBaseDAO implements QueueDAO { - - private static final Long UNACK_SCHEDULE_MS = 60_000L; - - public PostgresQueueDAO( - RetryTemplate retryTemplate, ObjectMapper objectMapper, DataSource dataSource) { - super(retryTemplate, objectMapper, dataSource); - - Executors.newSingleThreadScheduledExecutor() - .scheduleAtFixedRate( - this::processAllUnacks, - UNACK_SCHEDULE_MS, - UNACK_SCHEDULE_MS, - TimeUnit.MILLISECONDS); - logger.debug(PostgresQueueDAO.class.getName() + " is ready to serve"); - } - - @Override - public void push(String queueName, String messageId, long offsetTimeInSecond) { - push(queueName, messageId, 0, offsetTimeInSecond); - } - - @Override - public void push(String queueName, String messageId, int priority, long offsetTimeInSecond) { - withTransaction( - tx -> pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond)); - } - - @Override - public void push(String queueName, List messages) { - withTransaction( - tx -> - messages.forEach( - message -> - pushMessage( - tx, - queueName, - message.getId(), - message.getPayload(), - message.getPriority(), - 0))); - } - - @Override - public boolean pushIfNotExists(String queueName, String messageId, long offsetTimeInSecond) { - return pushIfNotExists(queueName, messageId, 0, offsetTimeInSecond); - } - - @Override - public boolean pushIfNotExists( - String queueName, String messageId, int priority, long offsetTimeInSecond) { - return getWithRetriedTransactions( - tx -> { - if (!existsMessage(tx, queueName, messageId)) { - pushMessage(tx, queueName, messageId, null, priority, offsetTimeInSecond); - return true; - } - return false; - }); - } - - @Override - public List pop(String queueName, int count, int timeout) { - return pollMessages(queueName, count, timeout).stream() - .map(Message::getId) - .collect(Collectors.toList()); - } - - @Override - public List pollMessages(String queueName, int count, int timeout) { - if (timeout < 1) { - List messages = - getWithTransactionWithOutErrorPropagation( - tx -> popMessages(tx, queueName, count, timeout)); - if (messages == null) { - return new ArrayList<>(); - } - return messages; - } - - long start = System.currentTimeMillis(); - final List messages = new ArrayList<>(); - - while (true) { - List messagesSlice = - getWithTransactionWithOutErrorPropagation( - tx -> popMessages(tx, queueName, count - messages.size(), timeout)); - if (messagesSlice == null) { - logger.warn( - "Unable to poll {} messages from {} due to tx conflict, only {} popped", - count, - queueName, - messages.size()); - // conflict could have happened, returned messages popped so far - return messages; - } - - messages.addAll(messagesSlice); - if (messages.size() >= count || ((System.currentTimeMillis() - start) > timeout)) { - return messages; - } - Uninterruptibles.sleepUninterruptibly(100, TimeUnit.MILLISECONDS); - } - } - - @Override - public void remove(String queueName, String messageId) { - withTransaction(tx -> removeMessage(tx, queueName, messageId)); - } - - @Override - public int getSize(String queueName) { - final String GET_QUEUE_SIZE = "SELECT COUNT(*) FROM queue_message WHERE queue_name = ?"; - return queryWithTransaction( - GET_QUEUE_SIZE, q -> ((Long) q.addParameter(queueName).executeCount()).intValue()); - } - - @Override - public boolean ack(String queueName, String messageId) { - return getWithRetriedTransactions(tx -> removeMessage(tx, queueName, messageId)); - } - - @Override - public boolean setUnackTimeout(String queueName, String messageId, long unackTimeout) { - long updatedOffsetTimeInSecond = unackTimeout / 1000; - - final String UPDATE_UNACK_TIMEOUT = - "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?"; - - return queryWithTransaction( - UPDATE_UNACK_TIMEOUT, - q -> - q.addParameter(updatedOffsetTimeInSecond) - .addParameter(updatedOffsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate()) - == 1; - } - - @Override - public void flush(String queueName) { - final String FLUSH_QUEUE = "DELETE FROM queue_message WHERE queue_name = ?"; - executeWithTransaction(FLUSH_QUEUE, q -> q.addParameter(queueName).executeDelete()); - } - - @Override - public Map queuesDetail() { - final String GET_QUEUES_DETAIL = - "SELECT queue_name, (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size FROM queue q FOR SHARE SKIP LOCKED"; - return queryWithTransaction( - GET_QUEUES_DETAIL, - q -> - q.executeAndFetch( - rs -> { - Map detail = Maps.newHashMap(); - while (rs.next()) { - String queueName = rs.getString("queue_name"); - Long size = rs.getLong("size"); - detail.put(queueName, size); - } - return detail; - })); - } - - @Override - public Map>> queuesDetailVerbose() { - // @formatter:off - final String GET_QUEUES_DETAIL_VERBOSE = - "SELECT queue_name, \n" - + " (SELECT count(*) FROM queue_message WHERE popped = false AND queue_name = q.queue_name) AS size,\n" - + " (SELECT count(*) FROM queue_message WHERE popped = true AND queue_name = q.queue_name) AS uacked \n" - + "FROM queue q FOR SHARE SKIP LOCKED"; - // @formatter:on - - return queryWithTransaction( - GET_QUEUES_DETAIL_VERBOSE, - q -> - q.executeAndFetch( - rs -> { - Map>> result = - Maps.newHashMap(); - while (rs.next()) { - String queueName = rs.getString("queue_name"); - Long size = rs.getLong("size"); - Long queueUnacked = rs.getLong("uacked"); - result.put( - queueName, - ImmutableMap.of( - "a", - ImmutableMap - .of( // sharding not implemented, - // returning only - // one shard with all the - // info - "size", - size, - "uacked", - queueUnacked))); - } - return result; - })); - } - - /** - * Un-pop all un-acknowledged messages for all queues. - * - * @since 1.11.6 - */ - public void processAllUnacks() { - logger.trace("processAllUnacks started"); - - getWithRetriedTransactions( - tx -> { - String LOCK_TASKS = - "SELECT queue_name, message_id FROM queue_message WHERE popped = true AND (deliver_on + (60 ||' seconds')::interval) < current_timestamp limit 1000 FOR UPDATE SKIP LOCKED"; - - List messages = - query( - tx, - LOCK_TASKS, - p -> - p.executeAndFetch( - rs -> { - List results = - new ArrayList(); - while (rs.next()) { - QueueMessage qm = new QueueMessage(); - qm.queueName = - rs.getString("queue_name"); - qm.messageId = - rs.getString("message_id"); - results.add(qm); - } - return results; - })); - - if (messages.size() == 0) { - return 0; - } - - Map> queueMessageMap = new HashMap>(); - for (QueueMessage qm : messages) { - if (!queueMessageMap.containsKey(qm.queueName)) { - queueMessageMap.put(qm.queueName, new ArrayList()); - } - queueMessageMap.get(qm.queueName).add(qm.messageId); - } - - int totalUnacked = 0; - for (String queueName : queueMessageMap.keySet()) { - Integer unacked = 0; - ; - try { - final List msgIds = queueMessageMap.get(queueName); - final String UPDATE_POPPED = - String.format( - "UPDATE queue_message SET popped = false WHERE queue_name = ? and message_id IN (%s)", - Query.generateInBindings(msgIds.size())); - - unacked = - query( - tx, - UPDATE_POPPED, - q -> - q.addParameter(queueName) - .addParameters(msgIds) - .executeUpdate()); - } catch (Exception e) { - e.printStackTrace(); - } - totalUnacked += unacked; - logger.debug("Unacked {} messages from all queues", unacked); - } - - if (totalUnacked > 0) { - logger.debug("Unacked {} messages from all queues", totalUnacked); - } - return totalUnacked; - }); - } - - @Override - public void processUnacks(String queueName) { - final String PROCESS_UNACKS = - "UPDATE queue_message SET popped = false WHERE queue_name = ? AND popped = true AND (current_timestamp - (60 ||' seconds')::interval) > deliver_on"; - executeWithTransaction(PROCESS_UNACKS, q -> q.addParameter(queueName).executeUpdate()); - } - - @Override - public boolean resetOffsetTime(String queueName, String messageId) { - long offsetTimeInSecond = 0; // Reset to 0 - final String SET_OFFSET_TIME = - "UPDATE queue_message SET offset_time_seconds = ?, deliver_on = (current_timestamp + (? ||' seconds')::interval) \n" - + "WHERE queue_name = ? AND message_id = ?"; - - return queryWithTransaction( - SET_OFFSET_TIME, - q -> - q.addParameter(offsetTimeInSecond) - .addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate() - == 1); - } - - private boolean existsMessage(Connection connection, String queueName, String messageId) { - final String EXISTS_MESSAGE = - "SELECT EXISTS(SELECT 1 FROM queue_message WHERE queue_name = ? AND message_id = ?) FOR SHARE"; - return query( - connection, - EXISTS_MESSAGE, - q -> q.addParameter(queueName).addParameter(messageId).exists()); - } - - private void pushMessage( - Connection connection, - String queueName, - String messageId, - String payload, - Integer priority, - long offsetTimeInSecond) { - - createQueueIfNotExists(connection, queueName); - - String UPDATE_MESSAGE = - "UPDATE queue_message SET payload=?, deliver_on=(current_timestamp + (? ||' seconds')::interval) WHERE queue_name = ? AND message_id = ?"; - int rowsUpdated = - query( - connection, - UPDATE_MESSAGE, - q -> - q.addParameter(payload) - .addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .executeUpdate()); - - if (rowsUpdated == 0) { - String PUSH_MESSAGE = - "INSERT INTO queue_message (deliver_on, queue_name, message_id, priority, offset_time_seconds, payload) VALUES ((current_timestamp + (? ||' seconds')::interval), ?,?,?,?,?) ON CONFLICT (queue_name,message_id) DO UPDATE SET payload=excluded.payload, deliver_on=excluded.deliver_on"; - execute( - connection, - PUSH_MESSAGE, - q -> - q.addParameter(offsetTimeInSecond) - .addParameter(queueName) - .addParameter(messageId) - .addParameter(priority) - .addParameter(offsetTimeInSecond) - .addParameter(payload) - .executeUpdate()); - } - } - - private boolean removeMessage(Connection connection, String queueName, String messageId) { - final String REMOVE_MESSAGE = - "DELETE FROM queue_message WHERE queue_name = ? AND message_id = ?"; - return query( - connection, - REMOVE_MESSAGE, - q -> q.addParameter(queueName).addParameter(messageId).executeDelete()); - } - - private List peekMessages(Connection connection, String queueName, int count) { - if (count < 1) { - return Collections.emptyList(); - } - - final String PEEK_MESSAGES = - "SELECT message_id, priority, payload FROM queue_message WHERE queue_name = ? AND popped = false AND deliver_on <= (current_timestamp + (1000 ||' microseconds')::interval) ORDER BY priority DESC, deliver_on, created_on LIMIT ? FOR UPDATE SKIP LOCKED"; - - return query( - connection, - PEEK_MESSAGES, - p -> - p.addParameter(queueName) - .addParameter(count) - .executeAndFetch( - rs -> { - List results = new ArrayList<>(); - while (rs.next()) { - Message m = new Message(); - m.setId(rs.getString("message_id")); - m.setPriority(rs.getInt("priority")); - m.setPayload(rs.getString("payload")); - results.add(m); - } - return results; - })); - } - - private List popMessages( - Connection connection, String queueName, int count, int timeout) { - List messages = peekMessages(connection, queueName, count); - - if (messages.isEmpty()) { - return messages; - } - - List poppedMessages = new ArrayList<>(); - for (Message message : messages) { - final String POP_MESSAGE = - "UPDATE queue_message SET popped = true WHERE queue_name = ? AND message_id = ? AND popped = false"; - int result = - query( - connection, - POP_MESSAGE, - q -> - q.addParameter(queueName) - .addParameter(message.getId()) - .executeUpdate()); - - if (result == 1) { - poppedMessages.add(message); - } - } - return poppedMessages; - } - - @Override - public boolean containsMessage(String queueName, String messageId) { - return getWithRetriedTransactions(tx -> existsMessage(tx, queueName, messageId)); - } - - private void createQueueIfNotExists(Connection connection, String queueName) { - logger.trace("Creating new queue '{}'", queueName); - final String EXISTS_QUEUE = - "SELECT EXISTS(SELECT 1 FROM queue WHERE queue_name = ?) FOR SHARE"; - boolean exists = query(connection, EXISTS_QUEUE, q -> q.addParameter(queueName).exists()); - if (!exists) { - final String CREATE_QUEUE = - "INSERT INTO queue (queue_name) VALUES (?) ON CONFLICT (queue_name) DO NOTHING"; - execute(connection, CREATE_QUEUE, q -> q.addParameter(queueName).executeUpdate()); - } - } - - private class QueueMessage { - public String queueName; - public String messageId; - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java deleted file mode 100644 index 97bc85bec1..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ExecuteFunction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.sql.SQLException; - -/** - * Functional interface for {@link Query} executions with no expected result. - * - * @author mustafa - */ -@FunctionalInterface -public interface ExecuteFunction { - - void apply(Query query) throws SQLException; -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java deleted file mode 100644 index 9f98e770d0..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/LazyToString.java +++ /dev/null @@ -1,33 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.util.function.Supplier; - -/** Functional class to support the lazy execution of a String result. */ -public class LazyToString { - - private final Supplier supplier; - - /** - * @param supplier Supplier to execute when {@link #toString()} is called. - */ - public LazyToString(Supplier supplier) { - this.supplier = supplier; - } - - @Override - public String toString() { - return supplier.get(); - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java deleted file mode 100644 index 935eb99ecd..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/Query.java +++ /dev/null @@ -1,628 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.io.IOException; -import java.sql.Connection; -import java.sql.Date; -import java.sql.PreparedStatement; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.Collection; -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; - -import org.apache.commons.lang3.math.NumberUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.core.exception.ApplicationException.Code; - -import com.fasterxml.jackson.core.JsonProcessingException; -import com.fasterxml.jackson.databind.ObjectMapper; - -/** - * Represents a {@link PreparedStatement} that is wrapped with convenience methods and utilities. - * - *

This class simulates a parameter building pattern and all {@literal addParameter(*)} methods - * must be called in the proper order of their expected binding sequence. - * - * @author mustafa - */ -public class Query implements AutoCloseable { - - private final Logger logger = LoggerFactory.getLogger(getClass()); - - /** The {@link ObjectMapper} instance to use for serializing/deserializing JSON. */ - protected final ObjectMapper objectMapper; - - /** The initial supplied query String that was used to prepare {@link #statement}. */ - private final String rawQuery; - - /** - * Parameter index for the {@code ResultSet#set*(*)} methods, gets incremented every time a - * parameter is added to the {@code PreparedStatement} {@link #statement}. - */ - private final AtomicInteger index = new AtomicInteger(1); - - /** The {@link PreparedStatement} that will be managed and executed by this class. */ - private final PreparedStatement statement; - - public Query(ObjectMapper objectMapper, Connection connection, String query) { - this.rawQuery = query; - this.objectMapper = objectMapper; - - try { - this.statement = connection.prepareStatement(query); - } catch (SQLException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, - "Cannot prepare statement for query: " + ex.getMessage(), - ex); - } - } - - /** - * Generate a String with {@literal count} number of '?' placeholders for {@link - * PreparedStatement} queries. - * - * @param count The number of '?' chars to generate. - * @return a comma delimited string of {@literal count} '?' binding placeholders. - */ - public static String generateInBindings(int count) { - String[] questions = new String[count]; - for (int i = 0; i < count; i++) { - questions[i] = "?"; - } - - return String.join(", ", questions); - } - - public Query addParameter(final String value) { - return addParameterInternal((ps, idx) -> ps.setString(idx, value)); - } - - public Query addParameter(final int value) { - return addParameterInternal((ps, idx) -> ps.setInt(idx, value)); - } - - public Query addParameter(final boolean value) { - return addParameterInternal(((ps, idx) -> ps.setBoolean(idx, value))); - } - - public Query addParameter(final long value) { - return addParameterInternal((ps, idx) -> ps.setLong(idx, value)); - } - - public Query addParameter(final double value) { - return addParameterInternal((ps, idx) -> ps.setDouble(idx, value)); - } - - public Query addParameter(Date date) { - return addParameterInternal((ps, idx) -> ps.setDate(idx, date)); - } - - public Query addParameter(Timestamp timestamp) { - return addParameterInternal((ps, idx) -> ps.setTimestamp(idx, timestamp)); - } - - /** - * Serializes {@literal value} to a JSON string for persistence. - * - * @param value The value to serialize. - * @return {@literal this} - */ - public Query addJsonParameter(Object value) { - return addParameter(toJson(value)); - } - - /** - * Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Date}. - * - * @param date The {@literal java.util.Date} to bind. - * @return {@literal this} - */ - public Query addDateParameter(java.util.Date date) { - return addParameter(new Date(date.getTime())); - } - - /** - * Bind the given {@link java.util.Date} to the PreparedStatement as a {@link Timestamp}. - * - * @param date The {@literal java.util.Date} to bind. - * @return {@literal this} - */ - public Query addTimestampParameter(java.util.Date date) { - return addParameter(new Timestamp(date.getTime())); - } - - /** - * Bind the given epoch millis to the PreparedStatement as a {@link Timestamp}. - * - * @param epochMillis The epoch ms to create a new {@literal Timestamp} from. - * @return {@literal this} - */ - public Query addTimestampParameter(long epochMillis) { - return addParameter(new Timestamp(epochMillis)); - } - - /** - * Add a collection of primitive values at once, in the order of the collection. - * - * @param values The values to bind to the prepared statement. - * @return {@literal this} - * @throws IllegalArgumentException If a non-primitive/unsupported type is encountered in the - * collection. - * @see #addParameters(Object...) - */ - public Query addParameters(Collection values) { - return addParameters(values.toArray()); - } - - /** - * Add many primitive values at once. - * - * @param values The values to bind to the prepared statement. - * @return {@literal this} - * @throws IllegalArgumentException If a non-primitive/unsupported type is encountered. - */ - public Query addParameters(Object... values) { - for (Object v : values) { - if (v instanceof String) { - addParameter((String) v); - } else if (v instanceof Integer) { - addParameter((Integer) v); - } else if (v instanceof Long) { - addParameter((Long) v); - } else if (v instanceof Double) { - addParameter((Double) v); - } else if (v instanceof Boolean) { - addParameter((Boolean) v); - } else if (v instanceof Date) { - addParameter((Date) v); - } else if (v instanceof Timestamp) { - addParameter((Timestamp) v); - } else { - throw new IllegalArgumentException( - "Type " - + v.getClass().getName() - + " is not supported by automatic property assignment"); - } - } - - return this; - } - - /** - * Utility method for evaluating the prepared statement as a query to check the existence of a - * record using a numeric count or boolean return value. - * - *

The {@link #rawQuery} provided must result in a {@link Number} or {@link Boolean} result. - * - * @return {@literal true} If a count query returned more than 0 or an exists query returns - * {@literal true}. - * @throws ApplicationException If an unexpected return type cannot be evaluated to a {@code - * Boolean} result. - */ - public boolean exists() { - Object val = executeScalar(); - if (null == val) { - return false; - } - - if (val instanceof Number) { - return convertLong(val) > 0; - } - - if (val instanceof Boolean) { - return (Boolean) val; - } - - if (val instanceof String) { - return convertBoolean(val); - } - - throw new ApplicationException( - Code.BACKEND_ERROR, - "Expected a Numeric or Boolean scalar return value from the query, received " - + val.getClass().getName()); - } - - /** - * Convenience method for executing delete statements. - * - * @return {@literal true} if the statement affected 1 or more rows. - * @see #executeUpdate() - */ - public boolean executeDelete() { - int count = executeUpdate(); - if (count > 1) { - logger.trace("Removed {} row(s) for query {}", count, rawQuery); - } - - return count > 0; - } - - /** - * Convenience method for executing statements that return a single numeric value, typically - * {@literal SELECT COUNT...} style queries. - * - * @return The result of the query as a {@literal long}. - */ - public long executeCount() { - return executeScalar(Long.class); - } - - /** - * @return The result of {@link PreparedStatement#executeUpdate()} - */ - public int executeUpdate() { - try { - - Long start = null; - if (logger.isTraceEnabled()) { - start = System.currentTimeMillis(); - } - - final int val = this.statement.executeUpdate(); - - if (null != start && logger.isTraceEnabled()) { - long end = System.currentTimeMillis(); - logger.trace("[{}ms] {}: {}", (end - start), val, rawQuery); - } - - return val; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex.getMessage(), ex); - } - } - - /** - * Execute a query from the PreparedStatement and return the ResultSet. - * - *

NOTE: The returned ResultSet must be closed/managed by the calling methods. - * - * @return {@link PreparedStatement#executeQuery()} - * @throws ApplicationException If any SQL errors occur. - */ - public ResultSet executeQuery() { - Long start = null; - if (logger.isTraceEnabled()) { - start = System.currentTimeMillis(); - } - - try { - return this.statement.executeQuery(); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } finally { - if (null != start && logger.isTraceEnabled()) { - long end = System.currentTimeMillis(); - logger.trace("[{}ms] {}", (end - start), rawQuery); - } - } - } - - /** - * @return The single result of the query as an Object. - */ - public Object executeScalar() { - try (ResultSet rs = executeQuery()) { - if (!rs.next()) { - return null; - } - return rs.getObject(1); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the PreparedStatement and return a single 'primitive' value from the ResultSet. - * - * @param returnType The type to return. - * @param The type parameter to return a List of. - * @return A single result from the execution of the statement, as a type of {@literal - * returnType}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public V executeScalar(Class returnType) { - try (ResultSet rs = executeQuery()) { - if (!rs.next()) { - Object value = null; - if (Integer.class == returnType) { - value = 0; - } else if (Long.class == returnType) { - value = 0L; - } else if (Boolean.class == returnType) { - value = false; - } - return returnType.cast(value); - } else { - return getScalarFromResultSet(rs, returnType); - } - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the PreparedStatement and return a List of 'primitive' values from the ResultSet. - * - * @param returnType The type Class return a List of. - * @param The type parameter to return a List of. - * @return A {@code List}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public List executeScalarList(Class returnType) { - try (ResultSet rs = executeQuery()) { - List values = new ArrayList<>(); - while (rs.next()) { - values.add(getScalarFromResultSet(rs, returnType)); - } - return values; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the statement and return only the first record from the result set. - * - * @param returnType The Class to return. - * @param The type parameter. - * @return An instance of {@literal } from the result set. - */ - public V executeAndFetchFirst(Class returnType) { - Object o = executeScalar(); - if (null == o) { - return null; - } - return convert(o, returnType); - } - - /** - * Execute the PreparedStatement and return a List of {@literal returnType} values from the - * ResultSet. - * - * @param returnType The type Class return a List of. - * @param The type parameter to return a List of. - * @return A {@code List}. - * @throws ApplicationException {@literal returnType} is unsupported, cannot be cast to from the - * result, or any SQL errors occur. - */ - public List executeAndFetch(Class returnType) { - try (ResultSet rs = executeQuery()) { - List list = new ArrayList<>(); - while (rs.next()) { - list.add(convert(rs.getObject(1), returnType)); - } - return list; - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - /** - * Execute the query and pass the {@link ResultSet} to the given handler. - * - * @param handler The {@link ResultSetHandler} to execute. - * @param The return type of this method. - * @return The results of {@link ResultSetHandler#apply(ResultSet)}. - */ - public V executeAndFetch(ResultSetHandler handler) { - try (ResultSet rs = executeQuery()) { - return handler.apply(rs); - } catch (SQLException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - @Override - public void close() { - try { - if (null != statement && !statement.isClosed()) { - statement.close(); - } - } catch (SQLException ex) { - logger.warn("Error closing prepared statement: {}", ex.getMessage()); - } - } - - protected final Query addParameterInternal(InternalParameterSetter setter) { - int index = getAndIncrementIndex(); - try { - setter.apply(this.statement, index); - return this; - } catch (SQLException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, "Could not apply bind parameter at index " + index, ex); - } - } - - protected V getScalarFromResultSet(ResultSet rs, Class returnType) throws SQLException { - Object value = null; - - if (Integer.class == returnType) { - value = rs.getInt(1); - } else if (Long.class == returnType) { - value = rs.getLong(1); - } else if (String.class == returnType) { - value = rs.getString(1); - } else if (Boolean.class == returnType) { - value = rs.getBoolean(1); - } else if (Double.class == returnType) { - value = rs.getDouble(1); - } else if (Date.class == returnType) { - value = rs.getDate(1); - } else if (Timestamp.class == returnType) { - value = rs.getTimestamp(1); - } else { - value = rs.getObject(1); - } - - if (null == value) { - throw new NullPointerException( - "Cannot get value from ResultSet of type " + returnType.getName()); - } - - return returnType.cast(value); - } - - protected V convert(Object value, Class returnType) { - if (Boolean.class == returnType) { - return returnType.cast(convertBoolean(value)); - } else if (Integer.class == returnType) { - return returnType.cast(convertInt(value)); - } else if (Long.class == returnType) { - return returnType.cast(convertLong(value)); - } else if (Double.class == returnType) { - return returnType.cast(convertDouble(value)); - } else if (String.class == returnType) { - return returnType.cast(convertString(value)); - } else if (value instanceof String) { - return fromJson((String) value, returnType); - } - - final String vName = value.getClass().getName(); - final String rName = returnType.getName(); - throw new ApplicationException( - Code.BACKEND_ERROR, "Cannot convert type " + vName + " to " + rName); - } - - protected Integer convertInt(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Integer) { - return (Integer) value; - } - - if (value instanceof Number) { - return ((Number) value).intValue(); - } - - return NumberUtils.toInt(value.toString()); - } - - protected Double convertDouble(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Double) { - return (Double) value; - } - - if (value instanceof Number) { - return ((Number) value).doubleValue(); - } - - return NumberUtils.toDouble(value.toString()); - } - - protected Long convertLong(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Long) { - return (Long) value; - } - - if (value instanceof Number) { - return ((Number) value).longValue(); - } - return NumberUtils.toLong(value.toString()); - } - - protected String convertString(Object value) { - if (null == value) { - return null; - } - - if (value instanceof String) { - return (String) value; - } - - return value.toString().trim(); - } - - protected Boolean convertBoolean(Object value) { - if (null == value) { - return null; - } - - if (value instanceof Boolean) { - return (Boolean) value; - } - - if (value instanceof Number) { - return ((Number) value).intValue() != 0; - } - - String text = value.toString().trim(); - return "Y".equalsIgnoreCase(text) - || "YES".equalsIgnoreCase(text) - || "TRUE".equalsIgnoreCase(text) - || "T".equalsIgnoreCase(text) - || "1".equalsIgnoreCase(text); - } - - protected String toJson(Object value) { - if (null == value) { - return null; - } - - try { - return objectMapper.writeValueAsString(value); - } catch (JsonProcessingException ex) { - throw new ApplicationException(Code.BACKEND_ERROR, ex); - } - } - - protected V fromJson(String value, Class returnType) { - if (null == value) { - return null; - } - - try { - return objectMapper.readValue(value, returnType); - } catch (IOException ex) { - throw new ApplicationException( - Code.BACKEND_ERROR, - "Could not convert JSON '" + value + "' to " + returnType.getName(), - ex); - } - } - - protected final int getIndex() { - return index.get(); - } - - protected final int getAndIncrementIndex() { - return index.getAndIncrement(); - } - - @FunctionalInterface - private interface InternalParameterSetter { - - void apply(PreparedStatement ps, int idx) throws SQLException; - } -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java deleted file mode 100644 index fd9a4f658e..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/QueryFunction.java +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.sql.SQLException; - -/** - * Functional interface for {@link Query} executions that return results. - * - * @author mustafa - */ -@FunctionalInterface -public interface QueryFunction { - - R apply(Query query) throws SQLException; -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java deleted file mode 100644 index b823dfecc2..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/ResultSetHandler.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.sql.ResultSet; -import java.sql.SQLException; - -/** - * Functional interface for {@link Query#executeAndFetch(ResultSetHandler)}. - * - * @author mustafa - */ -@FunctionalInterface -public interface ResultSetHandler { - - R apply(ResultSet resultSet) throws SQLException; -} diff --git a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java b/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java deleted file mode 100644 index 0d08c69ac2..0000000000 --- a/postgres-persistence/src/main/java/com/netflix/conductor/postgres/util/TransactionalFunction.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.util; - -import java.sql.Connection; -import java.sql.SQLException; - -/** - * Functional interface for operations within a transactional context. - * - * @author mustafa - */ -@FunctionalInterface -public interface TransactionalFunction { - - R apply(Connection tx) throws SQLException; -} diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V1__initial_schema.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V1__initial_schema.sql deleted file mode 100644 index a76611b27d..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V1__initial_schema.sql +++ /dev/null @@ -1,173 +0,0 @@ - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR METADATA DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE meta_event_handler ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - event varchar(255) NOT NULL, - active boolean NOT NULL, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE INDEX event_handler_name_index ON meta_event_handler (name); -CREATE INDEX event_handler_event_index ON meta_event_handler (event); - -CREATE TABLE meta_task_def ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_task_def_name ON meta_task_def (name); - -CREATE TABLE meta_workflow_def ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - name varchar(255) NOT NULL, - version int NOT NULL, - latest_version int NOT NULL DEFAULT 0, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_name_version ON meta_workflow_def (name,version); -CREATE INDEX workflow_def_name_index ON meta_workflow_def (name); - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR EXECUTION DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE event_execution ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - event_handler_name varchar(255) NOT NULL, - event_name varchar(255) NOT NULL, - message_id varchar(255) NOT NULL, - execution_id varchar(255) NOT NULL, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,message_id); - -CREATE TABLE poll_data ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - domain varchar(255) NOT NULL, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_poll_data ON poll_data (queue_name,domain); -CREATE INDEX ON poll_data (queue_name); - -CREATE TABLE task_scheduled ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - task_key varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_workflow_id_task_key ON task_scheduled (workflow_id,task_key); - -CREATE TABLE task_in_progress ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - task_def_name varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - workflow_id varchar(255) NOT NULL, - in_progress_status boolean NOT NULL DEFAULT false, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_task_def_task_id1 ON task_in_progress (task_def_name,task_id); - -CREATE TABLE task ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - task_id varchar(255) NOT NULL, - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_task_id ON task (task_id); - -CREATE TABLE workflow ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - correlation_id varchar(255), - json_data TEXT NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_workflow_id ON workflow (workflow_id); - -CREATE TABLE workflow_def_to_workflow ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_def varchar(255) NOT NULL, - date_str varchar(60), - workflow_id varchar(255) NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_workflow_def_date_str ON workflow_def_to_workflow (workflow_def,date_str,workflow_id); - -CREATE TABLE workflow_pending ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_type varchar(255) NOT NULL, - workflow_id varchar(255) NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_workflow_type_workflow_id ON workflow_pending (workflow_type,workflow_id); -CREATE INDEX workflow_type_index ON workflow_pending (workflow_type); - -CREATE TABLE workflow_to_task ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - modified_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - workflow_id varchar(255) NOT NULL, - task_id varchar(255) NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_workflow_to_task_id ON workflow_to_task (workflow_id,task_id); -CREATE INDEX workflow_id_index ON workflow_to_task (workflow_id); - --- -------------------------------------------------------------------------------------------------------------- --- SCHEMA FOR QUEUE DAO --- -------------------------------------------------------------------------------------------------------------- - -CREATE TABLE queue ( - id SERIAL, - created_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_queue_name ON queue (queue_name); - -CREATE TABLE queue_message ( - id SERIAL, - created_on TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP, - deliver_on TIMESTAMP DEFAULT CURRENT_TIMESTAMP, - queue_name varchar(255) NOT NULL, - message_id varchar(255) NOT NULL, - priority integer DEFAULT 0, - popped boolean DEFAULT false, - offset_time_seconds BIGINT, - payload TEXT, - PRIMARY KEY (id) -); -CREATE UNIQUE INDEX unique_queue_name_message_id ON queue_message (queue_name,message_id); -CREATE INDEX combo_queue_message ON queue_message (queue_name,popped,deliver_on,created_on); diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V2__1009_Fix_PostgresExecutionDAO_Index.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V2__1009_Fix_PostgresExecutionDAO_Index.sql deleted file mode 100644 index 03b132ab0d..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V2__1009_Fix_PostgresExecutionDAO_Index.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX IF EXISTS unique_event_execution; - -CREATE UNIQUE INDEX unique_event_execution ON event_execution (event_handler_name,event_name,execution_id); \ No newline at end of file diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V3__correlation_id_index.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V3__correlation_id_index.sql deleted file mode 100644 index 9ced890da0..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V3__correlation_id_index.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX IF EXISTS workflow_corr_id_index; - -CREATE INDEX workflow_corr_id_index ON workflow (correlation_id); \ No newline at end of file diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V4__new_qm_index_with_priority.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V4__new_qm_index_with_priority.sql deleted file mode 100644 index 23d12a37c2..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V4__new_qm_index_with_priority.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX IF EXISTS combo_queue_message; - -CREATE INDEX combo_queue_message ON queue_message (queue_name,priority,popped,deliver_on,created_on); \ No newline at end of file diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V5__new_queue_message_pk.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V5__new_queue_message_pk.sql deleted file mode 100644 index 6fefa6019f..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V5__new_queue_message_pk.sql +++ /dev/null @@ -1,11 +0,0 @@ --- no longer need separate index if pk is queue_name, message_id -DROP INDEX IF EXISTS unique_queue_name_message_id; - --- remove id primary key -ALTER TABLE queue_message DROP CONSTRAINT IF EXISTS queue_message_pkey; - --- remove id column -ALTER TABLE queue_message DROP COLUMN IF EXISTS id; - --- set primary key to queue_name, message_id -ALTER TABLE queue_message ADD PRIMARY KEY (queue_name, message_id); diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V6__update_pk.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V6__update_pk.sql deleted file mode 100644 index 24613543bf..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V6__update_pk.sql +++ /dev/null @@ -1,77 +0,0 @@ --- 1) queue_message -DROP INDEX IF EXISTS unique_queue_name_message_id; -ALTER TABLE queue_message DROP CONSTRAINT IF EXISTS queue_message_pkey; -ALTER TABLE queue_message DROP COLUMN IF EXISTS id; -ALTER TABLE queue_message ADD PRIMARY KEY (queue_name, message_id); - --- 2) queue -DROP INDEX IF EXISTS unique_queue_name; -ALTER TABLE queue DROP CONSTRAINT IF EXISTS queue_pkey; -ALTER TABLE queue DROP COLUMN IF EXISTS id; -ALTER TABLE queue ADD PRIMARY KEY (queue_name); - --- 3) workflow_to_task -DROP INDEX IF EXISTS unique_workflow_to_task_id; -ALTER TABLE workflow_to_task DROP CONSTRAINT IF EXISTS workflow_to_task_pkey; -ALTER TABLE workflow_to_task DROP COLUMN IF EXISTS id; -ALTER TABLE workflow_to_task ADD PRIMARY KEY (workflow_id, task_id); - --- 4) workflow_pending -DROP INDEX IF EXISTS unique_workflow_type_workflow_id; -ALTER TABLE workflow_pending DROP CONSTRAINT IF EXISTS workflow_pending_pkey; -ALTER TABLE workflow_pending DROP COLUMN IF EXISTS id; -ALTER TABLE workflow_pending ADD PRIMARY KEY (workflow_type, workflow_id); - --- 5) workflow_def_to_workflow -DROP INDEX IF EXISTS unique_workflow_def_date_str; -ALTER TABLE workflow_def_to_workflow DROP CONSTRAINT IF EXISTS workflow_def_to_workflow_pkey; -ALTER TABLE workflow_def_to_workflow DROP COLUMN IF EXISTS id; -ALTER TABLE workflow_def_to_workflow ADD PRIMARY KEY (workflow_def, date_str, workflow_id); - --- 6) workflow -DROP INDEX IF EXISTS unique_workflow_id; -ALTER TABLE workflow DROP CONSTRAINT IF EXISTS workflow_pkey; -ALTER TABLE workflow DROP COLUMN IF EXISTS id; -ALTER TABLE workflow ADD PRIMARY KEY (workflow_id); - --- 7) task -DROP INDEX IF EXISTS unique_task_id; -ALTER TABLE task DROP CONSTRAINT IF EXISTS task_pkey; -ALTER TABLE task DROP COLUMN IF EXISTS id; -ALTER TABLE task ADD PRIMARY KEY (task_id); - --- 8) task_in_progress -DROP INDEX IF EXISTS unique_task_def_task_id1; -ALTER TABLE task_in_progress DROP CONSTRAINT IF EXISTS task_in_progress_pkey; -ALTER TABLE task_in_progress DROP COLUMN IF EXISTS id; -ALTER TABLE task_in_progress ADD PRIMARY KEY (task_def_name, task_id); - --- 9) task_scheduled -DROP INDEX IF EXISTS unique_workflow_id_task_key; -ALTER TABLE task_scheduled DROP CONSTRAINT IF EXISTS task_scheduled_pkey; -ALTER TABLE task_scheduled DROP COLUMN IF EXISTS id; -ALTER TABLE task_scheduled ADD PRIMARY KEY (workflow_id, task_key); - --- 10) poll_data -DROP INDEX IF EXISTS unique_poll_data; -ALTER TABLE poll_data DROP CONSTRAINT IF EXISTS poll_data_pkey; -ALTER TABLE poll_data DROP COLUMN IF EXISTS id; -ALTER TABLE poll_data ADD PRIMARY KEY (queue_name, domain); - --- 11) event_execution -DROP INDEX IF EXISTS unique_event_execution; -ALTER TABLE event_execution DROP CONSTRAINT IF EXISTS event_execution_pkey; -ALTER TABLE event_execution DROP COLUMN IF EXISTS id; -ALTER TABLE event_execution ADD PRIMARY KEY (event_handler_name, event_name, execution_id); - --- 12) meta_workflow_def -DROP INDEX IF EXISTS unique_name_version; -ALTER TABLE meta_workflow_def DROP CONSTRAINT IF EXISTS meta_workflow_def_pkey; -ALTER TABLE meta_workflow_def DROP COLUMN IF EXISTS id; -ALTER TABLE meta_workflow_def ADD PRIMARY KEY (name, version); - --- 13) meta_task_def -DROP INDEX IF EXISTS unique_task_def_name; -ALTER TABLE meta_task_def DROP CONSTRAINT IF EXISTS meta_task_def_pkey; -ALTER TABLE meta_task_def DROP COLUMN IF EXISTS id; -ALTER TABLE meta_task_def ADD PRIMARY KEY (name); diff --git a/postgres-persistence/src/main/resources/db/migration_postgres/V7__new_qm_index_desc_priority.sql b/postgres-persistence/src/main/resources/db/migration_postgres/V7__new_qm_index_desc_priority.sql deleted file mode 100644 index 149dcc4c54..0000000000 --- a/postgres-persistence/src/main/resources/db/migration_postgres/V7__new_qm_index_desc_priority.sql +++ /dev/null @@ -1,3 +0,0 @@ -DROP INDEX IF EXISTS combo_queue_message; - -CREATE INDEX combo_queue_message ON queue_message USING btree (queue_name , priority desc, popped, deliver_on, created_on) \ No newline at end of file diff --git a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java deleted file mode 100644 index 3fb12ab8c9..0000000000 --- a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresExecutionDAOTest.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.util.List; - -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.dao.ExecutionDAO; -import com.netflix.conductor.dao.ExecutionDAOTest; -import com.netflix.conductor.model.WorkflowModel; -import com.netflix.conductor.postgres.config.PostgresConfiguration; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - PostgresConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class PostgresExecutionDAOTest extends ExecutionDAOTest { - - @Autowired private PostgresExecutionDAO executionDAO; - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void testPendingByCorrelationId() { - - WorkflowDef def = new WorkflowDef(); - def.setName("pending_count_correlation_jtest"); - - WorkflowModel workflow = createTestWorkflow(); - workflow.setWorkflowDefinition(def); - - generateWorkflows(workflow, 10); - - List bycorrelationId = - getExecutionDAO() - .getWorkflowsByCorrelationId( - "pending_count_correlation_jtest", "corr001", true); - assertNotNull(bycorrelationId); - assertEquals(10, bycorrelationId.size()); - } - - @Test - public void testRemoveWorkflow() { - WorkflowDef def = new WorkflowDef(); - def.setName("workflow"); - - WorkflowModel workflow = createTestWorkflow(); - workflow.setWorkflowDefinition(def); - - List ids = generateWorkflows(workflow, 1); - - assertEquals(1, getExecutionDAO().getPendingWorkflowCount("workflow")); - ids.forEach(wfId -> getExecutionDAO().removeWorkflow(wfId)); - assertEquals(0, getExecutionDAO().getPendingWorkflowCount("workflow")); - } - - @Override - public ExecutionDAO getExecutionDAO() { - return executionDAO; - } -} diff --git a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java deleted file mode 100644 index b9a03ebd1a..0000000000 --- a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresMetadataDAOTest.java +++ /dev/null @@ -1,289 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.util.Arrays; -import java.util.List; -import java.util.Optional; -import java.util.Set; -import java.util.UUID; -import java.util.stream.Collectors; - -import org.apache.commons.lang3.builder.EqualsBuilder; -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.common.metadata.events.EventHandler; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.core.exception.ApplicationException; -import com.netflix.conductor.postgres.config.PostgresConfiguration; - -import static com.netflix.conductor.core.exception.ApplicationException.Code.CONFLICT; -import static com.netflix.conductor.core.exception.ApplicationException.Code.NOT_FOUND; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertThrows; -import static org.junit.Assert.assertTrue; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - PostgresConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class PostgresMetadataDAOTest { - - @Autowired private PostgresMetadataDAO metadataDAO; - - @Rule public TestName name = new TestName(); - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void testDuplicateWorkflowDef() { - WorkflowDef def = new WorkflowDef(); - def.setName("testDuplicate"); - def.setVersion(1); - - metadataDAO.createWorkflowDef(def); - - ApplicationException applicationException = - assertThrows(ApplicationException.class, () -> metadataDAO.createWorkflowDef(def)); - assertEquals( - "Workflow with testDuplicate.1 already exists!", applicationException.getMessage()); - assertEquals(CONFLICT, applicationException.getCode()); - } - - @Test - public void testRemoveNotExistingWorkflowDef() { - ApplicationException applicationException = - assertThrows( - ApplicationException.class, () -> metadataDAO.removeWorkflowDef("test", 1)); - assertEquals( - "No such workflow definition: test version: 1", applicationException.getMessage()); - assertEquals(NOT_FOUND, applicationException.getCode()); - } - - @Test - public void testWorkflowDefOperations() { - WorkflowDef def = new WorkflowDef(); - def.setName("test"); - def.setVersion(1); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setOwnerApp("ownerApp"); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - metadataDAO.createWorkflowDef(def); - - List all = metadataDAO.getAllWorkflowDefs(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - WorkflowDef found = metadataDAO.getWorkflowDef("test", 1).get(); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - - def.setVersion(3); - metadataDAO.createWorkflowDef(def); - - all = metadataDAO.getAllWorkflowDefs(); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(1, all.get(0).getVersion()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(def.getVersion(), found.getVersion()); - assertEquals(3, found.getVersion()); - - all = metadataDAO.getAllLatest(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals(3, all.get(0).getVersion()); - - all = metadataDAO.getAllVersions(def.getName()); - assertNotNull(all); - assertEquals(2, all.size()); - assertEquals("test", all.get(0).getName()); - assertEquals("test", all.get(1).getName()); - assertEquals(1, all.get(0).getVersion()); - assertEquals(3, all.get(1).getVersion()); - - def.setDescription("updated"); - metadataDAO.updateWorkflowDef(def); - found = metadataDAO.getWorkflowDef(def.getName(), def.getVersion()).get(); - assertEquals(def.getDescription(), found.getDescription()); - - List allnames = metadataDAO.findAll(); - assertNotNull(allnames); - assertEquals(1, allnames.size()); - assertEquals(def.getName(), allnames.get(0)); - - def.setVersion(2); - metadataDAO.createWorkflowDef(def); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(3, found.getVersion()); - - metadataDAO.removeWorkflowDef("test", 3); - Optional deleted = metadataDAO.getWorkflowDef("test", 3); - assertFalse(deleted.isPresent()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(2, found.getVersion()); - - metadataDAO.removeWorkflowDef("test", 1); - deleted = metadataDAO.getWorkflowDef("test", 1); - assertFalse(deleted.isPresent()); - - found = metadataDAO.getLatestWorkflowDef(def.getName()).get(); - assertEquals(def.getName(), found.getName()); - assertEquals(2, found.getVersion()); - } - - @Test - public void testTaskDefOperations() { - TaskDef def = new TaskDef("taskA"); - def.setDescription("description"); - def.setCreatedBy("unit_test"); - def.setCreateTime(1L); - def.setInputKeys(Arrays.asList("a", "b", "c")); - def.setOutputKeys(Arrays.asList("01", "o2")); - def.setOwnerApp("ownerApp"); - def.setRetryCount(3); - def.setRetryDelaySeconds(100); - def.setRetryLogic(TaskDef.RetryLogic.FIXED); - def.setTimeoutPolicy(TaskDef.TimeoutPolicy.ALERT_ONLY); - def.setUpdatedBy("unit_test2"); - def.setUpdateTime(2L); - - metadataDAO.createTaskDef(def); - - TaskDef found = metadataDAO.getTaskDef(def.getName()); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - - def.setDescription("updated description"); - metadataDAO.updateTaskDef(def); - found = metadataDAO.getTaskDef(def.getName()); - assertTrue(EqualsBuilder.reflectionEquals(def, found)); - assertEquals("updated description", found.getDescription()); - - for (int i = 0; i < 9; i++) { - TaskDef tdf = new TaskDef("taskA" + i); - metadataDAO.createTaskDef(tdf); - } - - List all = metadataDAO.getAllTaskDefs(); - assertNotNull(all); - assertEquals(10, all.size()); - Set allnames = all.stream().map(TaskDef::getName).collect(Collectors.toSet()); - assertEquals(10, allnames.size()); - List sorted = allnames.stream().sorted().collect(Collectors.toList()); - assertEquals(def.getName(), sorted.get(0)); - - for (int i = 0; i < 9; i++) { - assertEquals(def.getName() + i, sorted.get(i + 1)); - } - - for (int i = 0; i < 9; i++) { - metadataDAO.removeTaskDef(def.getName() + i); - } - all = metadataDAO.getAllTaskDefs(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(def.getName(), all.get(0).getName()); - } - - @Test - public void testRemoveNotExistingTaskDef() { - ApplicationException applicationException = - assertThrows( - ApplicationException.class, - () -> metadataDAO.removeTaskDef("test" + UUID.randomUUID().toString())); - assertEquals("No such task definition", applicationException.getMessage()); - assertEquals(NOT_FOUND, applicationException.getCode()); - } - - @Test - public void testEventHandlers() { - String event1 = "SQS::arn:account090:sqstest1"; - String event2 = "SQS::arn:account090:sqstest2"; - - EventHandler eventHandler = new EventHandler(); - eventHandler.setName(UUID.randomUUID().toString()); - eventHandler.setActive(false); - EventHandler.Action action = new EventHandler.Action(); - action.setAction(EventHandler.Action.Type.start_workflow); - action.setStart_workflow(new EventHandler.StartWorkflow()); - action.getStart_workflow().setName("workflow_x"); - eventHandler.getActions().add(action); - eventHandler.setEvent(event1); - - metadataDAO.addEventHandler(eventHandler); - List all = metadataDAO.getAllEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - assertEquals(eventHandler.getName(), all.get(0).getName()); - assertEquals(eventHandler.getEvent(), all.get(0).getEvent()); - - List byEvents = metadataDAO.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); // event is marked as in-active - - eventHandler.setActive(true); - eventHandler.setEvent(event2); - metadataDAO.updateEventHandler(eventHandler); - - all = metadataDAO.getAllEventHandlers(); - assertNotNull(all); - assertEquals(1, all.size()); - - byEvents = metadataDAO.getEventHandlersForEvent(event1, true); - assertNotNull(byEvents); - assertEquals(0, byEvents.size()); - - byEvents = metadataDAO.getEventHandlersForEvent(event2, true); - assertNotNull(byEvents); - assertEquals(1, byEvents.size()); - } -} diff --git a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java deleted file mode 100644 index d435d2bfb9..0000000000 --- a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/dao/PostgresQueueDAOTest.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.dao; - -import java.sql.Connection; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import javax.sql.DataSource; - -import org.flywaydb.core.Flyway; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TestName; -import org.junit.runner.RunWith; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.beans.factory.annotation.Qualifier; -import org.springframework.boot.autoconfigure.flyway.FlywayAutoConfiguration; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.ContextConfiguration; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.config.TestObjectMapperConfiguration; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.postgres.config.PostgresConfiguration; -import com.netflix.conductor.postgres.util.Query; - -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.collect.ImmutableList; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -@ContextConfiguration( - classes = { - TestObjectMapperConfiguration.class, - PostgresConfiguration.class, - FlywayAutoConfiguration.class - }) -@RunWith(SpringRunner.class) -@SpringBootTest -public class PostgresQueueDAOTest { - - private static final Logger LOGGER = LoggerFactory.getLogger(PostgresQueueDAOTest.class); - - @Autowired private PostgresQueueDAO queueDAO; - - @Qualifier("dataSource") - @Autowired - private DataSource dataSource; - - @Autowired private ObjectMapper objectMapper; - - @Rule public TestName name = new TestName(); - - @Autowired Flyway flyway; - - // clean the database between tests. - @Before - public void before() { - flyway.clean(); - flyway.migrate(); - } - - @Test - public void complexQueueTest() { - String queueName = "TestQueue"; - long offsetTimeInSecond = 0; - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.push(queueName, messageId, offsetTimeInSecond); - } - int size = queueDAO.getSize(queueName); - assertEquals(10, size); - Map details = queueDAO.queuesDetail(); - assertEquals(1, details.size()); - assertEquals(10L, details.get(queueName).longValue()); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - - List popped = queueDAO.pop(queueName, 10, 100); - assertNotNull(popped); - assertEquals(10, popped.size()); - - Map>> verbose = queueDAO.queuesDetailVerbose(); - assertEquals(1, verbose.size()); - long shardSize = verbose.get(queueName).get("a").get("size"); - long unackedSize = verbose.get(queueName).get("a").get("uacked"); - assertEquals(0, shardSize); - assertEquals(10, unackedSize); - - popped.forEach(messageId -> queueDAO.ack(queueName, messageId)); - - verbose = queueDAO.queuesDetailVerbose(); - assertEquals(1, verbose.size()); - shardSize = verbose.get(queueName).get("a").get("size"); - unackedSize = verbose.get(queueName).get("a").get("uacked"); - assertEquals(0, shardSize); - assertEquals(0, unackedSize); - - popped = queueDAO.pop(queueName, 10, 100); - assertNotNull(popped); - assertEquals(0, popped.size()); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - size = queueDAO.getSize(queueName); - assertEquals(10, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertTrue(queueDAO.containsMessage(queueName, messageId)); - queueDAO.remove(queueName, messageId); - } - - size = queueDAO.getSize(queueName); - assertEquals(0, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.pushIfNotExists(queueName, messageId, offsetTimeInSecond); - } - queueDAO.flush(queueName); - size = queueDAO.getSize(queueName); - assertEquals(0, size); - } - - /** - * Test fix for https://github.com/Netflix/conductor/issues/399 - * - * @since 1.8.2-rc5 - */ - @Test - public void pollMessagesTest() { - final List messages = new ArrayList<>(); - final String queueName = "issue399_testQueue"; - final int totalSize = 10; - - for (int i = 0; i < totalSize; i++) { - String payload = "{\"id\": " + i + ", \"msg\":\"test " + i + "\"}"; - Message m = new Message("testmsg-" + i, payload, ""); - if (i % 2 == 0) { - // Set priority on message with pair id - m.setPriority(99 - i); - } - messages.add(m); - } - - // Populate the queue with our test message batch - queueDAO.push(queueName, ImmutableList.copyOf(messages)); - - // Assert that all messages were persisted and no extras are in there - assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName)); - - List zeroPoll = queueDAO.pollMessages(queueName, 0, 10_000); - assertTrue("Zero poll should be empty", zeroPoll.isEmpty()); - - final int firstPollSize = 3; - List firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 10_000); - assertNotNull("First poll was null", firstPoll); - assertFalse("First poll was empty", firstPoll.isEmpty()); - assertEquals("First poll size mismatch", firstPollSize, firstPoll.size()); - - final int secondPollSize = 4; - List secondPoll = queueDAO.pollMessages(queueName, secondPollSize, 10_000); - assertNotNull("Second poll was null", secondPoll); - assertFalse("Second poll was empty", secondPoll.isEmpty()); - assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size()); - - // Assert that the total queue size hasn't changed - assertEquals( - "Total queue size should have remained the same", - totalSize, - queueDAO.getSize(queueName)); - - // Assert that our un-popped messages match our expected size - final long expectedSize = totalSize - firstPollSize - secondPollSize; - try (Connection c = dataSource.getConnection()) { - String UNPOPPED = - "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try (Query q = new Query(objectMapper, c, UNPOPPED)) { - long count = q.addParameter(queueName).executeCount(); - assertEquals("Remaining queue size mismatch", expectedSize, count); - } - } catch (Exception ex) { - fail(ex.getMessage()); - } - } - - /** Test fix for https://github.com/Netflix/conductor/issues/1892 */ - @Test - public void containsMessageTest() { - String queueName = "TestQueue"; - long offsetTimeInSecond = 0; - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - queueDAO.push(queueName, messageId, offsetTimeInSecond); - } - int size = queueDAO.getSize(queueName); - assertEquals(10, size); - - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertTrue(queueDAO.containsMessage(queueName, messageId)); - queueDAO.remove(queueName, messageId); - } - for (int i = 0; i < 10; i++) { - String messageId = "msg" + i; - assertFalse(queueDAO.containsMessage(queueName, messageId)); - } - } - - /** - * Test fix for https://github.com/Netflix/conductor/issues/448 - * - * @since 1.8.2-rc5 - */ - @Test - public void pollDeferredMessagesTest() throws InterruptedException { - final List messages = new ArrayList<>(); - final String queueName = "issue448_testQueue"; - final int totalSize = 10; - - for (int i = 0; i < totalSize; i++) { - int offset = 0; - if (i < 5) { - offset = 0; - } else if (i == 6 || i == 7) { - // Purposefully skipping id:5 to test out of order deliveries - // Set id:6 and id:7 for a 2s delay to be picked up in the second polling batch - offset = 5; - } else { - // Set all other queue messages to have enough of a delay that they won't - // accidentally - // be picked up. - offset = 10_000 + i; - } - - String payload = "{\"id\": " + i + ",\"offset_time_seconds\":" + offset + "}"; - Message m = new Message("testmsg-" + i, payload, ""); - messages.add(m); - queueDAO.push(queueName, "testmsg-" + i, offset); - } - - // Assert that all messages were persisted and no extras are in there - assertEquals("Queue size mismatch", totalSize, queueDAO.getSize(queueName)); - - final int firstPollSize = 4; - List firstPoll = queueDAO.pollMessages(queueName, firstPollSize, 100); - assertNotNull("First poll was null", firstPoll); - assertFalse("First poll was empty", firstPoll.isEmpty()); - assertEquals("First poll size mismatch", firstPollSize, firstPoll.size()); - - List firstPollMessageIds = - messages.stream() - .map(Message::getId) - .collect(Collectors.toList()) - .subList(0, firstPollSize + 1); - - for (int i = 0; i < firstPollSize; i++) { - String actual = firstPoll.get(i).getId(); - assertTrue("Unexpected Id: " + actual, firstPollMessageIds.contains(actual)); - } - - final int secondPollSize = 3; - - // Sleep a bit to get the next batch of messages - LOGGER.debug("Sleeping for second poll..."); - Thread.sleep(5_000); - - // Poll for many more messages than expected - List secondPoll = queueDAO.pollMessages(queueName, secondPollSize + 10, 100); - assertNotNull("Second poll was null", secondPoll); - assertFalse("Second poll was empty", secondPoll.isEmpty()); - assertEquals("Second poll size mismatch", secondPollSize, secondPoll.size()); - - List expectedIds = Arrays.asList("testmsg-4", "testmsg-6", "testmsg-7"); - for (int i = 0; i < secondPollSize; i++) { - String actual = secondPoll.get(i).getId(); - assertTrue("Unexpected Id: " + actual, expectedIds.contains(actual)); - } - - // Assert that the total queue size hasn't changed - assertEquals( - "Total queue size should have remained the same", - totalSize, - queueDAO.getSize(queueName)); - - // Assert that our un-popped messages match our expected size - final long expectedSize = totalSize - firstPollSize - secondPollSize; - try (Connection c = dataSource.getConnection()) { - String UNPOPPED = - "SELECT COUNT(*) FROM queue_message WHERE queue_name = ? AND popped = false"; - try (Query q = new Query(objectMapper, c, UNPOPPED)) { - long count = q.addParameter(queueName).executeCount(); - assertEquals("Remaining queue size mismatch", expectedSize, count); - } - } catch (Exception ex) { - fail(ex.getMessage()); - } - } - - @Test - public void processUnacksTest() { - processUnacks( - () -> { - // Process unacks - queueDAO.processUnacks("process_unacks_test"); - }, - "process_unacks_test"); - } - - @Test - public void processAllUnacksTest() { - processUnacks( - () -> { - // Process all unacks - queueDAO.processAllUnacks(); - }, - "process_unacks_test"); - } - - private void processUnacks(Runnable unack, String queueName) { - // Count of messages in the queue(s) - final int count = 10; - // Number of messages to process acks for - final int unackedCount = 4; - // A secondary queue to make sure we don't accidentally process other queues - final String otherQueueName = "process_unacks_test_other_queue"; - - // Create testing queue with some messages (but not all) that will be popped/acked. - for (int i = 0; i < count; i++) { - int offset = 0; - if (i >= unackedCount) { - offset = 1_000_000; - } - - queueDAO.push(queueName, "unack-" + i, offset); - } - - // Create a second queue to make sure that unacks don't occur for it - for (int i = 0; i < count; i++) { - queueDAO.push(otherQueueName, "other-" + i, 0); - } - - // Poll for first batch of messages (should be equal to unackedCount) - List polled = queueDAO.pollMessages(queueName, 100, 10_000); - assertNotNull(polled); - assertFalse(polled.isEmpty()); - assertEquals(unackedCount, polled.size()); - - // Poll messages from the other queue so we know they don't get unacked later - queueDAO.pollMessages(otherQueueName, 100, 10_000); - - // Ack one of the polled messages - assertTrue(queueDAO.ack(queueName, "unack-1")); - - // Should have one less un-acked popped message in the queue - Long uacked = queueDAO.queuesDetailVerbose().get(queueName).get("a").get("uacked"); - assertNotNull(uacked); - assertEquals(uacked.longValue(), unackedCount - 1); - - unack.run(); - - // Check uacks for both queues after processing - Map>> details = queueDAO.queuesDetailVerbose(); - uacked = details.get(queueName).get("a").get("uacked"); - assertNotNull(uacked); - assertEquals( - "The messages that were polled should be unacked still", - uacked.longValue(), - unackedCount - 1); - - Long otherUacked = details.get(otherQueueName).get("a").get("uacked"); - assertNotNull(otherUacked); - assertEquals( - "Other queue should have all unacked messages", otherUacked.longValue(), count); - - Long size = queueDAO.queuesDetail().get(queueName); - assertNotNull(size); - assertEquals(size.longValue(), count - unackedCount); - } -} diff --git a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java b/postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java deleted file mode 100644 index 13e4507627..0000000000 --- a/postgres-persistence/src/test/java/com/netflix/conductor/postgres/performance/PerformanceTest.java +++ /dev/null @@ -1,454 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.postgres.performance; - -// SBMTODO: this test needs to be migrated -// reference - https://github.com/Netflix/conductor/pull/1940 -// @Ignore("This test cannot be automated") -// public class PerformanceTest { -// -// public static final int MSGS = 1000; -// public static final int PRODUCER_BATCH = 10; // make sure MSGS % PRODUCER_BATCH == 0 -// public static final int PRODUCERS = 4; -// public static final int WORKERS = 8; -// public static final int OBSERVERS = 4; -// public static final int OBSERVER_DELAY = 5000; -// public static final int UNACK_RUNNERS = 10; -// public static final int UNACK_DELAY = 500; -// public static final int WORKER_BATCH = 10; -// public static final int WORKER_BATCH_TIMEOUT = 500; -// public static final int COMPLETION_MONITOR_DELAY = 1000; -// -// private DataSource dataSource; -// private QueueDAO Q; -// private ExecutionDAO E; -// -// private final ExecutorService threadPool = Executors.newFixedThreadPool(PRODUCERS + WORKERS + -// OBSERVERS + UNACK_RUNNERS); -// private static final Logger LOGGER = LoggerFactory.getLogger(PerformanceTest.class); -// -// @Before -// public void setUp() { -// TestConfiguration testConfiguration = new TestConfiguration(); -// configuration = new TestPostgresConfiguration(testConfiguration, -// -// "jdbc:postgresql://localhost:54320/conductor?charset=utf8&parseTime=true&interpolateParams=true", -// 10, 2); -// PostgresDataSourceProvider dataSource = new PostgresDataSourceProvider(configuration); -// this.dataSource = dataSource.get(); -// resetAllData(this.dataSource); -// flywayMigrate(this.dataSource); -// -// final ObjectMapper objectMapper = new JsonMapperProvider().get(); -// Q = new PostgresQueueDAO(objectMapper, this.dataSource); -// E = new PostgresExecutionDAO(objectMapper, this.dataSource); -// } -// -// @After -// public void tearDown() throws Exception { -// resetAllData(dataSource); -// } -// -// public static final String QUEUE = "task_queue"; -// -// @Test -// public void testQueueDaoPerformance() throws InterruptedException { -// AtomicBoolean stop = new AtomicBoolean(false); -// Stopwatch start = Stopwatch.createStarted(); -// AtomicInteger poppedCoutner = new AtomicInteger(0); -// HashMultiset allPopped = HashMultiset.create(); -// -// // Consumers - workers -// for (int i = 0; i < WORKERS; i++) { -// threadPool.submit(() -> { -// while (!stop.get()) { -// List pop = Q.pollMessages(QUEUE, WORKER_BATCH, WORKER_BATCH_TIMEOUT); -// LOGGER.info("Popped {} messages", pop.size()); -// poppedCoutner.accumulateAndGet(pop.size(), Integer::sum); -// -// if (pop.size() == 0) { -// try { -// Thread.sleep(200); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } else { -// LOGGER.info("Popped {}", -// pop.stream().map(Message::getId).collect(Collectors.toList())); -// } -// -// pop.forEach(popped -> { -// synchronized (allPopped) { -// allPopped.add(popped.getId()); -// } -// boolean exists = Q.containsMessage(QUEUE, popped.getId()); -// boolean ack = Q.ack(QUEUE, popped.getId()); -// -// if (ack && exists) { -// // OK -// } else { -// LOGGER.error("Exists & Ack did not succeed for msg: {}", popped); -// } -// }); -// } -// }); -// } -// -// // Producers -// List> producers = Lists.newArrayList(); -// for (int i = 0; i < PRODUCERS; i++) { -// Future producer = threadPool.submit(() -> { -// try { -// // N messages -// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) { -// List randomMessages = getRandomMessages(PRODUCER_BATCH); -// Q.push(QUEUE, randomMessages); -// LOGGER.info("Pushed {} messages", PRODUCER_BATCH); -// LOGGER.info("Pushed {}", -// randomMessages.stream().map(Message::getId).collect(Collectors.toList())); -// } -// LOGGER.info("Pushed ALL"); -// } catch (Exception e) { -// LOGGER.error("Something went wrong with producer", e); -// throw new RuntimeException(e); -// } -// }); -// -// producers.add(producer); -// } -// -// // Observers -// for (int i = 0; i < OBSERVERS; i++) { -// threadPool.submit(() -> { -// while (!stop.get()) { -// try { -// int size = Q.getSize(QUEUE); -// Q.queuesDetail(); -// LOGGER.info("Size {} messages", size); -// } catch (Exception e) { -// LOGGER.info("Queue size failed, nevermind"); -// } -// -// try { -// Thread.sleep(OBSERVER_DELAY); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// }); -// } -// -// // Consumers - unack processor -// for (int i = 0; i < UNACK_RUNNERS; i++) { -// threadPool.submit(() -> { -// while (!stop.get()) { -// try { -// Q.processUnacks(QUEUE); -// } catch (Exception e) { -// LOGGER.info("Unack failed, nevermind", e); -// continue; -// } -// LOGGER.info("Unacked"); -// try { -// Thread.sleep(UNACK_DELAY); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// }); -// } -// -// long elapsed; -// while (true) { -// try { -// Thread.sleep(COMPLETION_MONITOR_DELAY); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// -// int size = Q.getSize(QUEUE); -// LOGGER.info("MONITOR SIZE : {}", size); -// -// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 && -// b2)) { -// elapsed = start.elapsed(TimeUnit.MILLISECONDS); -// stop.set(true); -// break; -// } -// } -// -// threadPool.awaitTermination(10, TimeUnit.SECONDS); -// threadPool.shutdown(); -// LOGGER.info("Finished in {} ms", elapsed); -// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000); -// LOGGER.info("Threads finished"); -// if (poppedCoutner.get() != MSGS * PRODUCERS) { -// synchronized (allPopped) { -// List duplicates = allPopped.entrySet().stream() -// .filter(stringEntry -> stringEntry.getCount() > 1) -// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount()) -// .collect(Collectors.toList()); -// -// LOGGER.error("Found duplicate pops: " + duplicates); -// } -// throw new RuntimeException("Popped " + poppedCoutner.get() + " != produced: " + MSGS * -// PRODUCERS); -// } -// } -// -// @Test -// public void testExecDaoPerformance() throws InterruptedException { -// AtomicBoolean stop = new AtomicBoolean(false); -// Stopwatch start = Stopwatch.createStarted(); -// BlockingDeque msgQueue = new LinkedBlockingDeque<>(1000); -// HashMultiset allPopped = HashMultiset.create(); -// -// // Consumers - workers -// for (int i = 0; i < WORKERS; i++) { -// threadPool.submit(() -> { -// while (!stop.get()) { -// List popped = new ArrayList<>(); -// while (true) { -// try { -// Task poll; -// poll = msgQueue.poll(10, TimeUnit.MILLISECONDS); -// -// if (poll == null) { -// // poll timed out -// continue; -// } -// synchronized (allPopped) { -// allPopped.add(poll.getTaskId()); -// } -// popped.add(poll); -// if (stop.get() || popped.size() == WORKER_BATCH) { -// break; -// } -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// -// LOGGER.info("Popped {} messages", popped.size()); -// LOGGER.info("Popped {}", -// popped.stream().map(Task::getTaskId).collect(Collectors.toList())); -// -// // Polling -// popped.stream() -// .peek(task -> { -// task.setWorkerId("someWorker"); -// task.setPollCount(task.getPollCount() + 1); -// task.setStartTime(System.currentTimeMillis()); -// }) -// .forEach(task -> { -// try { -// // should always be false -// boolean concurrentLimit = E.exceedsInProgressLimit(task); -// task.setStartTime(System.currentTimeMillis()); -// E.updateTask(task); -// LOGGER.info("Polled {}", task.getTaskId()); -// } catch (Exception e) { -// LOGGER.error("Something went wrong with worker during poll", e); -// throw new RuntimeException(e); -// } -// }); -// -// popped.forEach(task -> { -// try { -// -// String wfId = task.getWorkflowInstanceId(); -// Workflow workflow = E.getWorkflow(wfId, true); -// E.getTask(task.getTaskId()); -// -// task.setStatus(Task.Status.COMPLETED); -// task.setWorkerId("someWorker"); -// task.setOutputData(Collections.singletonMap("a", "b")); -// E.updateTask(task); -// E.updateWorkflow(workflow); -// LOGGER.info("Updated {}", task.getTaskId()); -// } catch (Exception e) { -// LOGGER.error("Something went wrong with worker during update", e); -// throw new RuntimeException(e); -// } -// }); -// -// } -// }); -// } -// -// Multiset pushedTasks = HashMultiset.create(); -// -// // Producers -// List> producers = Lists.newArrayList(); -// for (int i = 0; i < PRODUCERS; i++) { -// Future producer = threadPool.submit(() -> { -// // N messages -// for (int j = 0; j < MSGS / PRODUCER_BATCH; j++) { -// List randomTasks = getRandomTasks(PRODUCER_BATCH); -// -// Workflow wf = getWorkflow(randomTasks); -// E.createWorkflow(wf); -// -// E.createTasks(randomTasks); -// randomTasks.forEach(t -> { -// try { -// boolean offer = false; -// while (!offer) { -// offer = msgQueue.offer(t, 10, TimeUnit.MILLISECONDS); -// } -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// }); -// LOGGER.info("Pushed {} messages", PRODUCER_BATCH); -// List collect = -// randomTasks.stream().map(Task::getTaskId).collect(Collectors.toList()); -// synchronized (pushedTasks) { -// pushedTasks.addAll(collect); -// } -// LOGGER.info("Pushed {}", collect); -// } -// LOGGER.info("Pushed ALL"); -// }); -// -// producers.add(producer); -// } -// -// // Observers -// for (int i = 0; i < OBSERVERS; i++) { -// threadPool.submit(() -> { -// while (!stop.get()) { -// try { -// List size = E.getPendingTasksForTaskType("taskType"); -// LOGGER.info("Size {} messages", size.size()); -// LOGGER.info("Size q {} messages", msgQueue.size()); -// synchronized (allPopped) { -// LOGGER.info("All pp {} messages", allPopped.size()); -// } -// LOGGER.info("Workflows by correlation id size: {}", -// E.getWorkflowsByCorrelationId("abcd", "1", true).size()); -// LOGGER.info("Workflows by correlation id size: {}", -// E.getWorkflowsByCorrelationId("abcd", "2", true).size()); -// LOGGER.info("Workflows running ids: {}", E.getRunningWorkflowIds("abcd", -// 1)); -// LOGGER.info("Workflows pending count: {}", -// E.getPendingWorkflowCount("abcd")); -// } catch (Exception e) { -// LOGGER.warn("Observer failed ", e); -// } -// try { -// Thread.sleep(OBSERVER_DELAY); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// } -// }); -// } -// -// long elapsed; -// while (true) { -// try { -// Thread.sleep(COMPLETION_MONITOR_DELAY); -// } catch (InterruptedException e) { -// throw new RuntimeException(e); -// } -// -// int size; -// try { -// size = E.getPendingTasksForTaskType("taskType").size(); -// } catch (Exception e) { -// LOGGER.warn("Monitor failed", e); -// continue; -// } -// LOGGER.info("MONITOR SIZE : {}", size); -// -// if (size == 0 && producers.stream().map(Future::isDone).reduce(true, (b1, b2) -> b1 && -// b2)) { -// elapsed = start.elapsed(TimeUnit.MILLISECONDS); -// stop.set(true); -// break; -// } -// } -// -// threadPool.awaitTermination(10, TimeUnit.SECONDS); -// threadPool.shutdown(); -// LOGGER.info("Finished in {} ms", elapsed); -// LOGGER.info("Throughput {} msgs/second", ((MSGS * PRODUCERS) / (elapsed * 1.0)) * 1000); -// LOGGER.info("Threads finished"); -// -// List duplicates = pushedTasks.entrySet().stream() -// .filter(stringEntry -> stringEntry.getCount() > 1) -// .map(stringEntry -> stringEntry.getElement() + ": " + stringEntry.getCount()) -// .collect(Collectors.toList()); -// -// LOGGER.error("Found duplicate pushes: " + duplicates); -// } -// -// private Workflow getWorkflow(List randomTasks) { -// Workflow wf = new Workflow(); -// wf.setWorkflowId(randomTasks.get(0).getWorkflowInstanceId()); -// wf.setCorrelationId(wf.getWorkflowId()); -// wf.setTasks(randomTasks); -// WorkflowDef workflowDefinition = new WorkflowDef(); -// workflowDefinition.setName("abcd"); -// wf.setWorkflowDefinition(workflowDefinition); -// wf.setStartTime(System.currentTimeMillis()); -// return wf; -// } -// -// private List getRandomTasks(int i) { -// String timestamp = Long.toString(System.nanoTime()); -// return IntStream.range(0, i).mapToObj(j -> { -// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j; -// Task task = new Task(); -// task.setTaskId(id); -// task.setCorrelationId(Integer.toString(j)); -// task.setTaskType("taskType"); -// task.setReferenceTaskName("refName" + j); -// task.setWorkflowType("task_wf"); -// task.setWorkflowInstanceId(Thread.currentThread().getId() + "_" + timestamp); -// return task; -// }).collect(Collectors.toList()); -// } -// -// private List getRandomMessages(int i) { -// String timestamp = Long.toString(System.nanoTime()); -// return IntStream.range(0, i).mapToObj(j -> { -// String id = Thread.currentThread().getId() + "_" + timestamp + "_" + j; -// return new Message(id, "{ \"a\": \"b\", \"timestamp\": \" " + timestamp + " \"}", -// "receipt"); -// }).collect(Collectors.toList()); -// } -// -// private void flywayMigrate(DataSource dataSource) { -// FluentConfiguration flywayConfiguration = Flyway.configure() -// .table(configuration.getFlywayTable()) -// .locations(Paths.get("db","migration_postgres").toString()) -// .dataSource(dataSource) -// .placeholderReplacement(false); -// -// Flyway flyway = flywayConfiguration.load(); -// try { -// flyway.migrate(); -// } catch (FlywayException e) { -// if (e.getMessage().contains("non-empty")) { -// return; -// } -// throw e; -// } -// } -// -// public void resetAllData(DataSource dataSource) { -// // TODO -// } -// } diff --git a/postgres-persistence/src/test/resources/application.properties b/postgres-persistence/src/test/resources/application.properties deleted file mode 100644 index c7a5732047..0000000000 --- a/postgres-persistence/src/test/resources/application.properties +++ /dev/null @@ -1,7 +0,0 @@ -conductor.db.type=postgres -spring.datasource.url=jdbc:tc:postgresql:///conductor -spring.datasource.username=postgres -spring.datasource.password=postgres -spring.datasource.hikari.maximum-pool-size=8 -spring.datasource.hikari.auto-commit=false -spring.flyway.locations=classpath:db/migration_postgres diff --git a/redis-concurrency-limit/dependencies.lock b/redis-concurrency-limit/dependencies.lock index e5a8166299..8ad8d878ed 100644 --- a/redis-concurrency-limit/dependencies.lock +++ b/redis-concurrency-limit/dependencies.lock @@ -209,6 +209,18 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -352,6 +364,12 @@ "com.netflix.conductor:conductor-core" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ @@ -1108,6 +1126,12 @@ "com.netflix.conductor:conductor-core" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.docker-java:docker-java-api": { "locked": "3.2.8", "transitive": [ @@ -1126,6 +1150,12 @@ "org.testcontainers:testcontainers" ] }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -1387,6 +1417,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.groovy:groovy": { "locked": "2.5.14", "transitive": [ diff --git a/redis-lock/dependencies.lock b/redis-lock/dependencies.lock index e22ef46e5e..1c831ee951 100644 --- a/redis-lock/dependencies.lock +++ b/redis-lock/dependencies.lock @@ -316,6 +316,18 @@ "org.redisson:redisson" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -547,6 +559,12 @@ "com.netflix.conductor:conductor-core" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.jboss.marshalling:jboss-marshalling": { "locked": "2.0.9.Final", "transitive": [ @@ -1137,9 +1155,21 @@ "org.redisson:redisson" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.kstyrc:embedded-redis": { "locked": "0.6" }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.guava:guava": { "locked": "18.0", "transitive": [ @@ -1425,6 +1455,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.hamcrest:hamcrest": { "locked": "2.2", "transitive": [ diff --git a/redis-lock/src/main/resources/META-INF/additional-spring-configuration-metadata.json b/redis-lock/src/main/resources/META-INF/additional-spring-configuration-metadata.json index 3edd5aeeba..fe41f5bea9 100644 --- a/redis-lock/src/main/resources/META-INF/additional-spring-configuration-metadata.json +++ b/redis-lock/src/main/resources/META-INF/additional-spring-configuration-metadata.json @@ -6,6 +6,15 @@ } ], "hints": [ + { + "name": "conductor.workflow-execution-lock.type", + "values": [ + { + "value": "redis", + "description": "Use the redis-lock implementation as the lock provider." + } + ] + }, { "name": "conductor.redis-lock.server-type", "providers": [ diff --git a/redis-persistence/dependencies.lock b/redis-persistence/dependencies.lock index 6ea21f96f1..f828edefc1 100644 --- a/redis-persistence/dependencies.lock +++ b/redis-persistence/dependencies.lock @@ -455,6 +455,12 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.vlsi.compactmap:compactmap": { "locked": "1.2.1", "transitive": [ @@ -476,6 +482,12 @@ "com.netflix.netflix-commons:netflix-infix" ] }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.guava:guava": { "locked": "19.0", "transitive": [ @@ -886,6 +898,12 @@ "com.netflix.conductor:conductor-core" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.jettison:jettison": { "locked": "1.3.7", "transitive": [ @@ -1614,6 +1632,12 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, "com.github.vlsi.compactmap:compactmap": { "locked": "1.2.1", "transitive": [ @@ -1635,6 +1659,12 @@ "com.netflix.netflix-commons:netflix-infix" ] }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.guava:guava": { "locked": "19.0", "transitive": [ @@ -2097,6 +2127,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.codehaus.jettison:jettison": { "locked": "1.3.7", "transitive": [ diff --git a/rest/dependencies.lock b/rest/dependencies.lock index c573511cc5..dc888acedf 100644 --- a/rest/dependencies.lock +++ b/rest/dependencies.lock @@ -387,6 +387,18 @@ "org.springframework.boot:spring-boot-starter-json" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -579,6 +591,12 @@ "org.springframework.boot:spring-boot-starter-tomcat" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.glassfish:jakarta.el": { "locked": "3.0.3", "transitive": [ @@ -1331,6 +1349,18 @@ "org.springframework.boot:spring-boot-starter-json" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core" + ] + }, + "com.google.errorprone:error_prone_annotations": { + "locked": "2.4.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "com.google.protobuf:protobuf-java": { "locked": "3.13.0", "transitive": [ @@ -1569,6 +1599,12 @@ "org.springframework.boot:spring-boot-starter-test" ] }, + "org.checkerframework:checker-qual": { + "locked": "3.8.0", + "transitive": [ + "com.github.ben-manes.caffeine:caffeine" + ] + }, "org.glassfish:jakarta.el": { "locked": "3.0.3", "transitive": [ diff --git a/server/build.gradle b/server/build.gradle index e0010d93bb..6a67c556b7 100644 --- a/server/build.gradle +++ b/server/build.gradle @@ -19,16 +19,16 @@ dependencies { implementation project(':conductor-rest') implementation project(':conductor-core') - implementation project(':conductor-contribs') implementation project(':conductor-redis-persistence') implementation project(':conductor-cassandra-persistence') - implementation project(':conductor-postgres-persistence') - implementation project(':conductor-mysql-persistence') implementation project(':conductor-es6-persistence') - implementation(project(path: ':conductor-es7-persistence', configuration: 'shadow')) implementation project(':conductor-grpc-server') implementation project(':conductor-redis-lock') - implementation project(':conductor-postgres-external-storage') + implementation project(':conductor-redis-concurrency-limit') + implementation project(':conductor-http-task') + implementation project(':conductor-json-jq-task') + implementation project(':conductor-awss3-storage') + implementation project(':conductor-awssqs-event-queue') implementation 'org.springframework.boot:spring-boot-starter' implementation 'org.springframework.boot:spring-boot-starter-validation' @@ -42,11 +42,6 @@ dependencies { implementation "org.springdoc:springdoc-openapi-ui:${revOpenapi}" - implementation(group: 'com.rabbitmq', name: 'amqp-client'){ version{require "${revAmqpClient}"}} - runtimeOnly 'io.micrometer:micrometer-registry-datadog' - - runtimeOnly 'com.netflix.spectator:spectator-reg-micrometer' - runtimeOnly "org.glassfish.jaxb:jaxb-runtime:${revJAXB}" testImplementation project(':conductor-rest') diff --git a/server/dependencies.lock b/server/dependencies.lock index 253276a080..5d69572785 100644 --- a/server/dependencies.lock +++ b/server/dependencies.lock @@ -67,31 +67,31 @@ "org.hibernate.validator:hibernate-validator" ] }, - "com.netflix.conductor:conductor-cassandra-persistence": { + "com.netflix.conductor:conductor-awss3-storage": { "project": true }, - "com.netflix.conductor:conductor-contribs": { + "com.netflix.conductor:conductor-awssqs-event-queue": { "project": true }, - "com.netflix.conductor:conductor-core": { + "com.netflix.conductor:conductor-cassandra-persistence": { "project": true }, - "com.netflix.conductor:conductor-es6-persistence": { + "com.netflix.conductor:conductor-core": { "project": true }, - "com.netflix.conductor:conductor-es7-persistence": { + "com.netflix.conductor:conductor-es6-persistence": { "project": true }, "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true }, "com.netflix.conductor:conductor-redis-lock": { @@ -103,9 +103,6 @@ "com.netflix.conductor:conductor-rest": { "project": true }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, "io.github.classgraph:classgraph": { "locked": "4.8.143", "transitive": [ @@ -247,7 +244,6 @@ "org.slf4j:slf4j-api": { "locked": "1.7.30", "transitive": [ - "com.rabbitmq:amqp-client", "io.swagger.core.v3:swagger-core", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j", @@ -301,7 +297,6 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "org.springframework.boot:spring-boot-starter-actuator", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-validation", @@ -339,10 +334,7 @@ "locked": "2.3.12.RELEASE" }, "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-es7-persistence" - ] + "locked": "1.2.5.RELEASE" }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", @@ -461,13 +453,13 @@ "com.amazonaws:aws-java-sdk-s3": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awss3-storage" ] }, "com.amazonaws:aws-java-sdk-sqs": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awssqs-event-queue" ] }, "com.amazonaws:jmespath-java": { @@ -520,8 +512,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.eureka:eureka-client", "org.elasticsearch:elasticsearch-x-content", "org.redisson:redisson", @@ -543,8 +533,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.dyno-queues:dyno-queues-redis", "com.netflix.eureka:eureka-client", "io.swagger.core.v3:swagger-core", @@ -607,6 +595,13 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core", + "com.netflix.conductor:conductor-json-jq-task" + ] + }, "com.github.jnr:jffi": { "locked": "1.2.16", "transitive": [ @@ -638,12 +633,6 @@ "com.github.jnr:jnr-ffi" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.github.spullara.mustache.java:compiler": { "locked": "0.9.3", "transitive": [ @@ -693,6 +682,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -715,10 +705,8 @@ "com.google.inject:guice", "com.google.protobuf:protobuf-java-util", "com.netflix.archaius:archaius-core", - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-es6-persistence", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "io.grpc:grpc-api", @@ -759,8 +747,7 @@ "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-grpc", - "io.grpc:grpc-protobuf", - "mysql:mysql-connector-java" + "io.grpc:grpc-protobuf" ] }, "com.google.protobuf:protobuf-java-util": { @@ -801,38 +788,43 @@ "com.netflix.conductor:conductor-common" ] }, + "com.netflix.conductor:conductor-awss3-storage": { + "project": true + }, + "com.netflix.conductor:conductor-awssqs-event-queue": { + "project": true + }, "com.netflix.conductor:conductor-cassandra-persistence": { "project": true }, "com.netflix.conductor:conductor-common": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" ] }, - "com.netflix.conductor:conductor-contribs": { - "project": true - }, "com.netflix.conductor:conductor-core": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -841,9 +833,6 @@ "com.netflix.conductor:conductor-es6-persistence": { "project": true }, - "com.netflix.conductor:conductor-es7-persistence": { - "project": true - }, "com.netflix.conductor:conductor-grpc": { "project": true, "transitive": [ @@ -853,13 +842,13 @@ "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true }, "com.netflix.conductor:conductor-redis-lock": { @@ -966,27 +955,7 @@ "com.netflix.spectator:spectator-api": { "locked": "0.122.0", "transitive": [ - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" - ] - }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-core" ] }, "com.spotify:completable-futures": { @@ -1046,12 +1015,6 @@ "com.netflix.eureka:eureka-client" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "commons-cli:commons-cli": { "locked": "1.4", "transitive": [ @@ -1102,8 +1065,7 @@ "io.dropwizard.metrics:metrics-core": { "locked": "4.1.22", "transitive": [ - "com.datastax.cassandra:cassandra-driver-core", - "com.netflix.spectator:spectator-reg-metrics3" + "com.datastax.cassandra:cassandra-driver-core" ] }, "io.github.classgraph:classgraph": { @@ -1175,27 +1137,9 @@ "io.micrometer:micrometer-core": { "locked": "1.5.14", "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-datadog", - "io.micrometer:micrometer-registry-prometheus", "org.springframework.boot:spring-boot-starter-actuator" ] }, - "io.micrometer:micrometer-registry-datadog": { - "locked": "1.5.14" - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -1330,19 +1274,6 @@ "org.redisson:redisson" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.8.1", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex.rxjava2:rxjava": { "locked": "2.2.21", "transitive": [ @@ -1352,7 +1283,7 @@ "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-core" ] }, @@ -1430,7 +1361,7 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-http-task", "com.netflix.eureka:eureka-client", "com.sun.jersey:jersey-core" ] @@ -1449,12 +1380,6 @@ "org.elasticsearch:elasticsearch" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -1482,7 +1407,7 @@ "net.thisptr:jackson-jq": { "locked": "0.0.13", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-json-jq-task" ] }, "org.antlr:antlr-runtime": { @@ -1507,14 +1432,14 @@ "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.dyno:dyno-contrib", "com.netflix.dyno:dyno-core", @@ -1568,26 +1493,21 @@ "org.elasticsearch.client:elasticsearch-rest-client" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.5.1", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -1602,16 +1522,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -1624,16 +1545,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -1644,16 +1566,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -1664,16 +1587,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -1791,8 +1715,8 @@ "org.checkerframework:checker-qual": { "locked": "3.12.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, "org.codehaus.jettison:jettison": { @@ -1918,14 +1842,6 @@ "org.elasticsearch:elasticsearch" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.glassfish.jaxb:jaxb-runtime": { "locked": "2.3.3" }, @@ -2009,12 +1925,6 @@ "org.rarefiedredis.redis:redis-java" ] }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ @@ -2050,13 +1960,6 @@ "com.github.jnr:jnr-ffi" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20", - "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.projectlombok:lombok": { "locked": "1.18.20", "transitive": [ @@ -2105,14 +2008,8 @@ "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "com.zaxxer:HikariCP", "io.dropwizard.metrics:metrics-core", - "io.micrometer:micrometer-registry-datadog", "io.swagger.core.v3:swagger-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.redisson:redisson", "org.slf4j:jul-to-slf4j", @@ -2129,7 +2026,6 @@ "org.springdoc:springdoc-openapi-ui": { "locked": "1.6.8", "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", "com.netflix.conductor:conductor-rest" ] }, @@ -2171,9 +2067,7 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "org.springframework.boot:spring-boot-starter-actuator", - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-validation", "org.springframework.boot:spring-boot-starter-web" @@ -2182,14 +2076,6 @@ "org.springframework.boot:spring-boot-starter-actuator": { "locked": "2.3.12.RELEASE" }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.springframework.boot:spring-boot-starter-json": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -2221,10 +2107,7 @@ ] }, "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-es7-persistence" - ] + "locked": "1.2.5.RELEASE" }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", @@ -2238,8 +2121,6 @@ "transitive": [ "org.springframework:spring-aop", "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -2261,8 +2142,6 @@ "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -2280,18 +2159,6 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.springframework:spring-web": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -2320,12 +2187,6 @@ "org.springdoc:springdoc-openapi-ui" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.yaml:snakeyaml": { "locked": "1.26", "transitive": [ @@ -2338,6 +2199,7 @@ "redis.clients:jedis": { "locked": "3.3.0", "transitive": [ + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.dyno:dyno-jedis", "org.rarefiedredis.redis:redis-java" @@ -2393,13 +2255,13 @@ "com.amazonaws:aws-java-sdk-s3": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awss3-storage" ] }, "com.amazonaws:aws-java-sdk-sqs": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awssqs-event-queue" ] }, "com.amazonaws:jmespath-java": { @@ -2452,8 +2314,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.eureka:eureka-client", "org.elasticsearch:elasticsearch-x-content", "org.redisson:redisson", @@ -2475,8 +2335,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.dyno-queues:dyno-queues-redis", "com.netflix.eureka:eureka-client", "io.swagger.core.v3:swagger-core", @@ -2539,6 +2397,13 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core", + "com.netflix.conductor:conductor-json-jq-task" + ] + }, "com.github.jnr:jffi": { "locked": "1.2.16", "transitive": [ @@ -2570,12 +2435,6 @@ "com.github.jnr:jnr-ffi" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.github.spullara.mustache.java:compiler": { "locked": "0.9.3", "transitive": [ @@ -2625,6 +2484,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -2647,10 +2507,8 @@ "com.google.inject:guice", "com.google.protobuf:protobuf-java-util", "com.netflix.archaius:archaius-core", - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-es6-persistence", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "io.grpc:grpc-api", @@ -2691,8 +2549,7 @@ "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-grpc", - "io.grpc:grpc-protobuf", - "mysql:mysql-connector-java" + "io.grpc:grpc-protobuf" ] }, "com.google.protobuf:protobuf-java-util": { @@ -2733,38 +2590,43 @@ "com.netflix.conductor:conductor-common" ] }, + "com.netflix.conductor:conductor-awss3-storage": { + "project": true + }, + "com.netflix.conductor:conductor-awssqs-event-queue": { + "project": true + }, "com.netflix.conductor:conductor-cassandra-persistence": { "project": true }, "com.netflix.conductor:conductor-common": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" ] }, - "com.netflix.conductor:conductor-contribs": { - "project": true - }, "com.netflix.conductor:conductor-core": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -2773,9 +2635,6 @@ "com.netflix.conductor:conductor-es6-persistence": { "project": true }, - "com.netflix.conductor:conductor-es7-persistence": { - "project": true - }, "com.netflix.conductor:conductor-grpc": { "project": true, "transitive": [ @@ -2785,13 +2644,13 @@ "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true }, "com.netflix.conductor:conductor-redis-lock": { @@ -2898,27 +2757,7 @@ "com.netflix.spectator:spectator-api": { "locked": "0.122.0", "transitive": [ - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" - ] - }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-core" ] }, "com.spotify:completable-futures": { @@ -2978,12 +2817,6 @@ "com.netflix.eureka:eureka-client" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "commons-cli:commons-cli": { "locked": "1.4", "transitive": [ @@ -3034,8 +2867,7 @@ "io.dropwizard.metrics:metrics-core": { "locked": "4.1.22", "transitive": [ - "com.datastax.cassandra:cassandra-driver-core", - "com.netflix.spectator:spectator-reg-metrics3" + "com.datastax.cassandra:cassandra-driver-core" ] }, "io.github.classgraph:classgraph": { @@ -3107,27 +2939,9 @@ "io.micrometer:micrometer-core": { "locked": "1.5.14", "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-datadog", - "io.micrometer:micrometer-registry-prometheus", "org.springframework.boot:spring-boot-starter-actuator" ] }, - "io.micrometer:micrometer-registry-datadog": { - "locked": "1.5.14" - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -3262,19 +3076,6 @@ "org.redisson:redisson" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.8.1", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex.rxjava2:rxjava": { "locked": "2.2.21", "transitive": [ @@ -3284,7 +3085,7 @@ "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-core" ] }, @@ -3362,7 +3163,7 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-http-task", "com.netflix.eureka:eureka-client", "com.sun.jersey:jersey-core" ] @@ -3381,12 +3182,6 @@ "org.elasticsearch:elasticsearch" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -3414,7 +3209,7 @@ "net.thisptr:jackson-jq": { "locked": "0.0.13", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-json-jq-task" ] }, "org.antlr:antlr-runtime": { @@ -3439,14 +3234,14 @@ "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.dyno:dyno-contrib", "com.netflix.dyno:dyno-core", @@ -3500,26 +3295,21 @@ "org.elasticsearch.client:elasticsearch-rest-client" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.5.1", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -3534,16 +3324,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -3556,16 +3347,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -3576,16 +3368,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -3596,16 +3389,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -3723,8 +3517,8 @@ "org.checkerframework:checker-qual": { "locked": "3.12.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, "org.codehaus.jettison:jettison": { @@ -3850,14 +3644,6 @@ "org.elasticsearch:elasticsearch" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.glassfish.jaxb:jaxb-runtime": { "locked": "2.3.3" }, @@ -3941,12 +3727,6 @@ "org.rarefiedredis.redis:redis-java" ] }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.ow2.asm:asm": { "locked": "5.0.4", "transitive": [ @@ -3982,13 +3762,6 @@ "com.github.jnr:jnr-ffi" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20", - "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.projectlombok:lombok": { "locked": "1.18.20", "transitive": [ @@ -4037,14 +3810,8 @@ "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "com.zaxxer:HikariCP", "io.dropwizard.metrics:metrics-core", - "io.micrometer:micrometer-registry-datadog", "io.swagger.core.v3:swagger-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.redisson:redisson", "org.slf4j:jul-to-slf4j", @@ -4061,7 +3828,6 @@ "org.springdoc:springdoc-openapi-ui": { "locked": "1.6.8", "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", "com.netflix.conductor:conductor-rest" ] }, @@ -4103,9 +3869,7 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "org.springframework.boot:spring-boot-starter-actuator", - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-validation", "org.springframework.boot:spring-boot-starter-web" @@ -4114,14 +3878,6 @@ "org.springframework.boot:spring-boot-starter-actuator": { "locked": "2.3.12.RELEASE" }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.springframework.boot:spring-boot-starter-json": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -4153,10 +3909,7 @@ ] }, "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-es7-persistence" - ] + "locked": "1.2.5.RELEASE" }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", @@ -4170,8 +3923,6 @@ "transitive": [ "org.springframework:spring-aop", "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -4193,8 +3944,6 @@ "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -4212,18 +3961,6 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.springframework:spring-web": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -4252,12 +3989,6 @@ "org.springdoc:springdoc-openapi-ui" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.yaml:snakeyaml": { "locked": "1.26", "transitive": [ @@ -4270,6 +4001,7 @@ "redis.clients:jedis": { "locked": "3.3.0", "transitive": [ + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.dyno:dyno-jedis", "org.rarefiedredis.redis:redis-java" @@ -4415,34 +4147,34 @@ "org.springframework.boot:spring-boot-starter-test" ] }, - "com.netflix.conductor:conductor-cassandra-persistence": { + "com.netflix.conductor:conductor-awss3-storage": { "project": true }, - "com.netflix.conductor:conductor-common": { + "com.netflix.conductor:conductor-awssqs-event-queue": { "project": true }, - "com.netflix.conductor:conductor-contribs": { + "com.netflix.conductor:conductor-cassandra-persistence": { "project": true }, - "com.netflix.conductor:conductor-core": { + "com.netflix.conductor:conductor-common": { "project": true }, - "com.netflix.conductor:conductor-es6-persistence": { + "com.netflix.conductor:conductor-core": { "project": true }, - "com.netflix.conductor:conductor-es7-persistence": { + "com.netflix.conductor:conductor-es6-persistence": { "project": true }, "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true }, "com.netflix.conductor:conductor-redis-lock": { @@ -4454,9 +4186,6 @@ "com.netflix.conductor:conductor-rest": { "project": true }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, "com.vaadin.external.google:android-json": { "locked": "0.0.20131108.vaadin1", "transitive": [ @@ -4804,7 +4533,6 @@ "locked": "1.7.30", "transitive": [ "com.jayway.jsonpath:json-path", - "com.rabbitmq:amqp-client", "io.swagger.core.v3:swagger-core", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j", @@ -4861,7 +4589,6 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "org.springframework.boot:spring-boot-starter-actuator", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-test", @@ -4916,10 +4643,7 @@ ] }, "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-es7-persistence" - ] + "locked": "1.2.5.RELEASE" }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", @@ -5052,13 +4776,13 @@ "com.amazonaws:aws-java-sdk-s3": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awss3-storage" ] }, "com.amazonaws:aws-java-sdk-sqs": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awssqs-event-queue" ] }, "com.amazonaws:jmespath-java": { @@ -5111,8 +4835,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.eureka:eureka-client", "org.elasticsearch:elasticsearch-x-content", "org.redisson:redisson", @@ -5134,8 +4856,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.dyno-queues:dyno-queues-redis", "com.netflix.eureka:eureka-client", "io.swagger.core.v3:swagger-core", @@ -5198,6 +4918,13 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core", + "com.netflix.conductor:conductor-json-jq-task" + ] + }, "com.github.jnr:jffi": { "locked": "1.2.16", "transitive": [ @@ -5229,12 +4956,6 @@ "com.github.jnr:jnr-ffi" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.github.spullara.mustache.java:compiler": { "locked": "0.9.3", "transitive": [ @@ -5284,6 +5005,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -5306,10 +5028,8 @@ "com.google.inject:guice", "com.google.protobuf:protobuf-java-util", "com.netflix.archaius:archaius-core", - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-es6-persistence", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "io.grpc:grpc-api", @@ -5350,8 +5070,7 @@ "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-grpc", - "io.grpc:grpc-protobuf", - "mysql:mysql-connector-java" + "io.grpc:grpc-protobuf" ] }, "com.google.protobuf:protobuf-java-util": { @@ -5393,38 +5112,43 @@ "com.netflix.conductor:conductor-common" ] }, + "com.netflix.conductor:conductor-awss3-storage": { + "project": true + }, + "com.netflix.conductor:conductor-awssqs-event-queue": { + "project": true + }, "com.netflix.conductor:conductor-cassandra-persistence": { "project": true }, "com.netflix.conductor:conductor-common": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" ] }, - "com.netflix.conductor:conductor-contribs": { - "project": true - }, "com.netflix.conductor:conductor-core": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -5433,9 +5157,6 @@ "com.netflix.conductor:conductor-es6-persistence": { "project": true }, - "com.netflix.conductor:conductor-es7-persistence": { - "project": true - }, "com.netflix.conductor:conductor-grpc": { "project": true, "transitive": [ @@ -5445,13 +5166,13 @@ "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true }, "com.netflix.conductor:conductor-redis-lock": { @@ -5558,27 +5279,7 @@ "com.netflix.spectator:spectator-api": { "locked": "0.122.0", "transitive": [ - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" - ] - }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-core" ] }, "com.spotify:completable-futures": { @@ -5644,12 +5345,6 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "commons-cli:commons-cli": { "locked": "1.4", "transitive": [ @@ -5700,8 +5395,7 @@ "io.dropwizard.metrics:metrics-core": { "locked": "4.1.22", "transitive": [ - "com.datastax.cassandra:cassandra-driver-core", - "com.netflix.spectator:spectator-reg-metrics3" + "com.datastax.cassandra:cassandra-driver-core" ] }, "io.github.classgraph:classgraph": { @@ -5779,27 +5473,9 @@ "io.micrometer:micrometer-core": { "locked": "1.5.14", "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-datadog", - "io.micrometer:micrometer-registry-prometheus", "org.springframework.boot:spring-boot-starter-actuator" ] }, - "io.micrometer:micrometer-registry-datadog": { - "locked": "1.5.14" - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -5934,19 +5610,6 @@ "org.redisson:redisson" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.8.1", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex.rxjava2:rxjava": { "locked": "2.2.21", "transitive": [ @@ -5956,7 +5619,7 @@ "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-core" ] }, @@ -6035,7 +5698,7 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-http-task", "com.netflix.eureka:eureka-client", "com.sun.jersey:jersey-core" ] @@ -6061,12 +5724,6 @@ "org.junit.vintage:junit-vintage-engine" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -6101,7 +5758,7 @@ "net.thisptr:jackson-jq": { "locked": "0.0.13", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-json-jq-task" ] }, "org.antlr:antlr-runtime": { @@ -6126,14 +5783,14 @@ "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.dyno:dyno-contrib", "com.netflix.dyno:dyno-core", @@ -6187,26 +5844,21 @@ "org.elasticsearch.client:elasticsearch-rest-client" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.5.1", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -6221,16 +5873,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -6243,16 +5896,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -6263,16 +5917,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -6283,16 +5938,17 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" @@ -6427,8 +6083,8 @@ "org.checkerframework:checker-qual": { "locked": "3.12.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, "org.codehaus.jettison:jettison": { @@ -6554,14 +6210,6 @@ "org.elasticsearch:elasticsearch" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.glassfish.jaxb:jaxb-runtime": { "locked": "2.3.3" }, @@ -6717,12 +6365,6 @@ "org.rarefiedredis.redis:redis-java" ] }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.mockito:mockito-core": { "locked": "3.3.3", "transitive": [ @@ -6784,13 +6426,6 @@ "com.github.jnr:jnr-ffi" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20", - "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.projectlombok:lombok": { "locked": "1.18.20", "transitive": [ @@ -6845,14 +6480,8 @@ "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "com.zaxxer:HikariCP", "io.dropwizard.metrics:metrics-core", - "io.micrometer:micrometer-registry-datadog", "io.swagger.core.v3:swagger-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.redisson:redisson", "org.slf4j:jul-to-slf4j", @@ -6869,7 +6498,6 @@ "org.springdoc:springdoc-openapi-ui": { "locked": "1.6.8", "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", "com.netflix.conductor:conductor-rest" ] }, @@ -6914,9 +6542,7 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "org.springframework.boot:spring-boot-starter-actuator", - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-test", "org.springframework.boot:spring-boot-starter-validation", @@ -6926,14 +6552,6 @@ "org.springframework.boot:spring-boot-starter-actuator": { "locked": "2.3.12.RELEASE" }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.springframework.boot:spring-boot-starter-json": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -6981,10 +6599,7 @@ ] }, "org.springframework.retry:spring-retry": { - "locked": "1.2.5.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-es7-persistence" - ] + "locked": "1.2.5.RELEASE" }, "org.springframework:spring-aop": { "locked": "5.2.15.RELEASE", @@ -6998,8 +6613,6 @@ "transitive": [ "org.springframework:spring-aop", "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -7022,9 +6635,7 @@ "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", "org.springframework:spring-test", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -7042,24 +6653,12 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.springframework:spring-web": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -7088,12 +6687,6 @@ "org.springdoc:springdoc-openapi-ui" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -7112,6 +6705,7 @@ "redis.clients:jedis": { "locked": "3.3.0", "transitive": [ + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.dyno:dyno-jedis", "org.rarefiedredis.redis:redis-java" diff --git a/settings.gradle b/settings.gradle index 24f40da2f6..a3a92ae41b 100644 --- a/settings.gradle +++ b/settings.gradle @@ -35,26 +35,24 @@ include 'annotations-processor' include 'server' include 'common' include 'core' -include 'contribs' include 'client' include 'client-spring' include 'cassandra-persistence' include 'redis-persistence' -include 'mysql-persistence' -include 'postgres-persistence' include 'es6-persistence' -include 'es7-persistence' include 'redis-lock' -include 'zookeeper-lock' -include 'azureblob-storage' -include 'postgres-external-storage' +include 'awss3-storage' +include 'awssqs-event-queue' include 'redis-concurrency-limit' +include 'json-jq-task' +include 'http-task' + include 'rest' include 'grpc' include 'grpc-server' diff --git a/test-harness/build.gradle b/test-harness/build.gradle index eaf4016e3b..2a249ac0b6 100644 --- a/test-harness/build.gradle +++ b/test-harness/build.gradle @@ -5,15 +5,14 @@ dependencies { testImplementation project(':conductor-common') testImplementation project(':conductor-rest') testImplementation project(':conductor-core') - testImplementation project(':conductor-contribs') testImplementation project(':conductor-redis-persistence') - testImplementation project(':conductor-mysql-persistence') - testImplementation project(':conductor-postgres-persistence') testImplementation project(':conductor-cassandra-persistence') testImplementation project(':conductor-es6-persistence') testImplementation project(':conductor-grpc-server') testImplementation project(':conductor-client') testImplementation project(':conductor-grpc-client') + testImplementation project(':conductor-json-jq-task') + testImplementation project(':conductor-http-task') testImplementation "org.springframework.retry:spring-retry" @@ -37,13 +36,4 @@ dependencies { testImplementation "org.elasticsearch.client:elasticsearch-rest-high-level-client" testImplementation "org.testcontainers:elasticsearch:${revTestContainer}" - testImplementation "org.testcontainers:mysql:${revTestContainer}" - testImplementation "org.testcontainers:postgresql:${revTestContainer}" - testImplementation(group: 'com.rabbitmq', name: 'amqp-client'){ version{require "${revAmqpClient}"}} -} - -test { - testLogging { - exceptionFormat = 'full' - } } diff --git a/test-harness/dependencies.lock b/test-harness/dependencies.lock index b1d373509c..98b959ac88 100644 --- a/test-harness/dependencies.lock +++ b/test-harness/dependencies.lock @@ -239,9 +239,6 @@ "com.netflix.conductor:conductor-common": { "project": true }, - "com.netflix.conductor:conductor-contribs": { - "project": true - }, "com.netflix.conductor:conductor-core": { "project": true }, @@ -254,10 +251,10 @@ "com.netflix.conductor:conductor-grpc-server": { "project": true }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true }, "com.netflix.conductor:conductor-redis-persistence": { @@ -338,9 +335,6 @@ "com.netflix.dyno:dyno-contrib" ] }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0" - }, "com.sun.jersey:jersey-core": { "locked": "1.19.1", "transitive": [ @@ -1117,7 +1111,6 @@ "com.netflix.dyno:dyno-jedis", "com.netflix.dyno:dyno-memcache", "com.netflix.dyno:dyno-recipes", - "com.rabbitmq:amqp-client", "org.apache.logging.log4j:log4j-slf4j-impl", "org.slf4j:jul-to-slf4j", "org.testcontainers:testcontainers", @@ -1239,32 +1232,12 @@ "org.springframework:spring-web": { "locked": "5.2.15.RELEASE" }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, "org.testcontainers:elasticsearch": { "locked": "1.15.3" }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:mysql", - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:mysql": { - "locked": "1.15.3" - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, "org.testcontainers:testcontainers": { "locked": "1.15.3", "transitive": [ - "org.testcontainers:database-commons", "org.testcontainers:elasticsearch" ] }, @@ -1320,13 +1293,13 @@ "com.amazonaws:aws-java-sdk-s3": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awss3-storage" ] }, "com.amazonaws:aws-java-sdk-sqs": { "locked": "1.11.86", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-awssqs-event-queue" ] }, "com.amazonaws:jmespath-java": { @@ -1389,8 +1362,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.eureka:eureka-client", "org.elasticsearch:elasticsearch-x-content", "org.redisson:redisson", @@ -1414,8 +1385,6 @@ "com.netflix.archaius:archaius-core", "com.netflix.conductor:conductor-common", "com.netflix.conductor:conductor-core", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.dyno-queues:dyno-queues-redis", "com.netflix.eureka:eureka-client", "io.swagger.core.v3:swagger-core", @@ -1497,6 +1466,13 @@ "com.github.vlsi.compactmap:compactmap" ] }, + "com.github.ben-manes.caffeine:caffeine": { + "locked": "2.8.8", + "transitive": [ + "com.netflix.conductor:conductor-core", + "com.netflix.conductor:conductor-json-jq-task" + ] + }, "com.github.docker-java:docker-java-api": { "locked": "3.2.8", "transitive": [ @@ -1546,12 +1522,6 @@ "com.github.jnr:jnr-ffi" ] }, - "com.github.luben:zstd-jni": { - "locked": "1.4.4-7", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "com.github.spullara.mustache.java:compiler": { "locked": "0.9.3", "transitive": [ @@ -1602,6 +1572,7 @@ "com.google.errorprone:error_prone_annotations": { "locked": "2.10.0", "transitive": [ + "com.github.ben-manes.caffeine:caffeine", "com.google.guava:guava", "com.google.protobuf:protobuf-java-util", "io.grpc:grpc-api", @@ -1624,11 +1595,9 @@ "com.google.inject:guice", "com.google.protobuf:protobuf-java-util", "com.netflix.archaius:archaius-core", - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-client", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "io.grpc:grpc-api", @@ -1670,8 +1639,7 @@ "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", - "io.grpc:grpc-protobuf", - "mysql:mysql-connector-java" + "io.grpc:grpc-protobuf" ] }, "com.google.protobuf:protobuf-java-util": { @@ -1713,6 +1681,18 @@ "com.netflix.conductor:conductor-common" ] }, + "com.netflix.conductor:conductor-awss3-storage": { + "project": true, + "transitive": [ + "com.netflix.conductor:conductor-server" + ] + }, + "com.netflix.conductor:conductor-awssqs-event-queue": { + "project": true, + "transitive": [ + "com.netflix.conductor:conductor-server" + ] + }, "com.netflix.conductor:conductor-cassandra-persistence": { "project": true, "transitive": [ @@ -1725,37 +1705,33 @@ "com.netflix.conductor:conductor-common": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest" ] }, - "com.netflix.conductor:conductor-contribs": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-server" - ] - }, "com.netflix.conductor:conductor-core": { "project": true, "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -1768,12 +1744,6 @@ "com.netflix.conductor:conductor-server" ] }, - "com.netflix.conductor:conductor-es7-persistence": { - "project": true, - "transitive": [ - "com.netflix.conductor:conductor-server" - ] - }, "com.netflix.conductor:conductor-grpc": { "project": true, "transitive": [ @@ -1790,19 +1760,19 @@ "com.netflix.conductor:conductor-server" ] }, - "com.netflix.conductor:conductor-mysql-persistence": { + "com.netflix.conductor:conductor-http-task": { "project": true, "transitive": [ "com.netflix.conductor:conductor-server" ] }, - "com.netflix.conductor:conductor-postgres-external-storage": { + "com.netflix.conductor:conductor-json-jq-task": { "project": true, "transitive": [ "com.netflix.conductor:conductor-server" ] }, - "com.netflix.conductor:conductor-postgres-persistence": { + "com.netflix.conductor:conductor-redis-concurrency-limit": { "project": true, "transitive": [ "com.netflix.conductor:conductor-server" @@ -1926,29 +1896,7 @@ "locked": "0.122.0", "transitive": [ "com.netflix.conductor:conductor-client", - "com.netflix.conductor:conductor-core", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer" - ] - }, - "com.netflix.spectator:spectator-reg-metrics3": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "com.netflix.spectator:spectator-reg-micrometer": { - "locked": "0.122.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "com.netflix.conductor:conductor-server" - ] - }, - "com.rabbitmq:amqp-client": { - "locked": "5.14.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "com.netflix.conductor:conductor-server" + "com.netflix.conductor:conductor-core" ] }, "com.spotify:completable-futures": { @@ -2021,12 +1969,6 @@ "org.skyscreamer:jsonassert" ] }, - "com.zaxxer:HikariCP": { - "locked": "3.4.5", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "commons-cli:commons-cli": { "locked": "1.4", "transitive": [ @@ -2087,8 +2029,7 @@ "io.dropwizard.metrics:metrics-core": { "locked": "4.1.22", "transitive": [ - "com.datastax.cassandra:cassandra-driver-core", - "com.netflix.spectator:spectator-reg-metrics3" + "com.datastax.cassandra:cassandra-driver-core" ] }, "io.github.classgraph:classgraph": { @@ -2163,30 +2104,9 @@ "io.micrometer:micrometer-core": { "locked": "1.5.14", "transitive": [ - "com.netflix.spectator:spectator-reg-micrometer", - "io.micrometer:micrometer-registry-datadog", - "io.micrometer:micrometer-registry-prometheus", "org.springframework.boot:spring-boot-starter-actuator" ] }, - "io.micrometer:micrometer-registry-datadog": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.conductor:conductor-server" - ] - }, - "io.micrometer:micrometer-registry-prometheus": { - "locked": "1.5.14", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, - "io.nats:java-nats-streaming": { - "locked": "0.5.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "io.netty:netty-buffer": { "locked": "4.1.65.Final", "transitive": [ @@ -2321,19 +2241,6 @@ "org.redisson:redisson" ] }, - "io.prometheus:simpleclient": { - "locked": "0.9.0", - "transitive": [ - "com.netflix.conductor:conductor-contribs", - "io.prometheus:simpleclient_common" - ] - }, - "io.prometheus:simpleclient_common": { - "locked": "0.8.1", - "transitive": [ - "io.micrometer:micrometer-registry-prometheus" - ] - }, "io.reactivex.rxjava2:rxjava": { "locked": "2.2.21", "transitive": [ @@ -2343,7 +2250,7 @@ "io.reactivex:rxjava": { "locked": "1.3.8", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-core" ] }, @@ -2424,7 +2331,7 @@ "javax.ws.rs:jsr311-api": { "locked": "1.1.1", "transitive": [ - "com.netflix.conductor:conductor-contribs", + "com.netflix.conductor:conductor-http-task", "com.netflix.eureka:eureka-client", "com.sun.jersey:jersey-core" ] @@ -2458,12 +2365,6 @@ "org.testcontainers:testcontainers" ] }, - "mysql:mysql-connector-java": { - "locked": "8.0.25", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence" - ] - }, "net.bytebuddy:byte-buddy": { "locked": "1.10.22", "transitive": [ @@ -2505,7 +2406,7 @@ "net.thisptr:jackson-jq": { "locked": "0.0.13", "transitive": [ - "com.netflix.conductor:conductor-contribs" + "com.netflix.conductor:conductor-json-jq-task" ] }, "org.antlr:antlr-runtime": { @@ -2562,16 +2463,16 @@ "org.apache.commons:commons-lang3": { "locked": "3.10", "transitive": [ + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.dyno:dyno-contrib", "com.netflix.dyno:dyno-core", @@ -2625,28 +2526,23 @@ "org.elasticsearch.client:elasticsearch-rest-client" ] }, - "org.apache.kafka:kafka-clients": { - "locked": "2.5.1", - "transitive": [ - "com.netflix.conductor:conductor-contribs" - ] - }, "org.apache.logging.log4j:log4j-api": { "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -2662,18 +2558,19 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -2687,18 +2584,19 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -2710,18 +2608,19 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -2733,18 +2632,19 @@ "locked": "2.17.1", "transitive": [ "com.netflix.conductor:conductor-annotations", + "com.netflix.conductor:conductor-awss3-storage", + "com.netflix.conductor:conductor-awssqs-event-queue", "com.netflix.conductor:conductor-cassandra-persistence", "com.netflix.conductor:conductor-client", "com.netflix.conductor:conductor-common", - "com.netflix.conductor:conductor-contribs", "com.netflix.conductor:conductor-core", "com.netflix.conductor:conductor-es6-persistence", "com.netflix.conductor:conductor-grpc", "com.netflix.conductor:conductor-grpc-client", "com.netflix.conductor:conductor-grpc-server", - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence", + "com.netflix.conductor:conductor-http-task", + "com.netflix.conductor:conductor-json-jq-task", + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-lock", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.conductor:conductor-rest", @@ -2872,10 +2772,10 @@ ] }, "org.checkerframework:checker-qual": { - "locked": "3.5.0", + "locked": "3.8.0", "transitive": [ - "com.google.guava:guava", - "org.postgresql:postgresql" + "com.github.ben-manes.caffeine:caffeine", + "com.google.guava:guava" ] }, "org.codehaus.groovy:groovy": { @@ -3187,14 +3087,6 @@ "org.elasticsearch:elasticsearch" ] }, - "org.flywaydb:flyway-core": { - "locked": "6.4.4", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.glassfish.jaxb:jaxb-runtime": { "locked": "2.3.4", "transitive": [ @@ -3363,12 +3255,6 @@ "org.rarefiedredis.redis:redis-java" ] }, - "org.lz4:lz4-java": { - "locked": "1.7.1", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.mockito:mockito-core": { "locked": "3.3.3", "transitive": [ @@ -3430,13 +3316,6 @@ "com.github.jnr:jnr-ffi" ] }, - "org.postgresql:postgresql": { - "locked": "42.2.20", - "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.projectlombok:lombok": { "locked": "1.18.20", "transitive": [ @@ -3507,14 +3386,8 @@ "com.netflix.netflix-commons:netflix-infix", "com.netflix.servo:servo-core", "com.netflix.spectator:spectator-api", - "com.netflix.spectator:spectator-reg-metrics3", - "com.netflix.spectator:spectator-reg-micrometer", - "com.rabbitmq:amqp-client", - "com.zaxxer:HikariCP", "io.dropwizard.metrics:metrics-core", - "io.micrometer:micrometer-registry-datadog", "io.swagger.core.v3:swagger-core", - "org.apache.kafka:kafka-clients", "org.apache.logging.log4j:log4j-slf4j-impl", "org.redisson:redisson", "org.slf4j:jul-to-slf4j", @@ -3541,7 +3414,6 @@ "org.springdoc:springdoc-openapi-ui": { "locked": "1.6.8", "transitive": [ - "com.netflix.conductor:conductor-postgres-external-storage", "com.netflix.conductor:conductor-rest", "com.netflix.conductor:conductor-server" ] @@ -3587,10 +3459,8 @@ "org.springframework.boot:spring-boot-starter": { "locked": "2.3.12.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "com.netflix.conductor:conductor-server", "org.springframework.boot:spring-boot-starter-actuator", - "org.springframework.boot:spring-boot-starter-jdbc", "org.springframework.boot:spring-boot-starter-json", "org.springframework.boot:spring-boot-starter-test", "org.springframework.boot:spring-boot-starter-validation", @@ -3603,14 +3473,6 @@ "com.netflix.conductor:conductor-server" ] }, - "org.springframework.boot:spring-boot-starter-jdbc": { - "locked": "2.3.12.RELEASE", - "transitive": [ - "com.netflix.conductor:conductor-mysql-persistence", - "com.netflix.conductor:conductor-postgres-external-storage", - "com.netflix.conductor:conductor-postgres-persistence" - ] - }, "org.springframework.boot:spring-boot-starter-json": { "locked": "2.3.12.RELEASE", "transitive": [ @@ -3667,7 +3529,6 @@ "org.springframework.retry:spring-retry": { "locked": "1.2.5.RELEASE", "transitive": [ - "com.netflix.conductor:conductor-es7-persistence", "com.netflix.conductor:conductor-server" ] }, @@ -3683,8 +3544,6 @@ "transitive": [ "org.springframework:spring-aop", "org.springframework:spring-context", - "org.springframework:spring-jdbc", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -3707,9 +3566,7 @@ "org.springframework:spring-beans", "org.springframework:spring-context", "org.springframework:spring-expression", - "org.springframework:spring-jdbc", "org.springframework:spring-test", - "org.springframework:spring-tx", "org.springframework:spring-web", "org.springframework:spring-webmvc" ] @@ -3727,24 +3584,12 @@ "org.springframework:spring-core" ] }, - "org.springframework:spring-jdbc": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework.boot:spring-boot-starter-jdbc" - ] - }, "org.springframework:spring-test": { "locked": "5.2.15.RELEASE", "transitive": [ "org.springframework.boot:spring-boot-starter-test" ] }, - "org.springframework:spring-tx": { - "locked": "5.2.15.RELEASE", - "transitive": [ - "org.springframework:spring-jdbc" - ] - }, "org.springframework:spring-web": { "locked": "5.2.15.RELEASE", "transitive": [ @@ -3761,32 +3606,12 @@ "org.springframework.boot:spring-boot-starter-web" ] }, - "org.testcontainers:database-commons": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:jdbc" - ] - }, "org.testcontainers:elasticsearch": { "locked": "1.15.3" }, - "org.testcontainers:jdbc": { - "locked": "1.15.3", - "transitive": [ - "org.testcontainers:mysql", - "org.testcontainers:postgresql" - ] - }, - "org.testcontainers:mysql": { - "locked": "1.15.3" - }, - "org.testcontainers:postgresql": { - "locked": "1.15.3" - }, "org.testcontainers:testcontainers": { "locked": "1.15.3", "transitive": [ - "org.testcontainers:database-commons", "org.testcontainers:elasticsearch" ] }, @@ -3808,12 +3633,6 @@ "org.springdoc:springdoc-openapi-ui" ] }, - "org.xerial.snappy:snappy-java": { - "locked": "1.1.7.3", - "transitive": [ - "org.apache.kafka:kafka-clients" - ] - }, "org.xmlunit:xmlunit-core": { "locked": "2.7.0", "transitive": [ @@ -3832,6 +3651,7 @@ "redis.clients:jedis": { "locked": "3.3.0", "transitive": [ + "com.netflix.conductor:conductor-redis-concurrency-limit", "com.netflix.conductor:conductor-redis-persistence", "com.netflix.dyno:dyno-jedis", "org.rarefiedredis.redis:redis-java" diff --git a/test-harness/src/test/groovy/com/netflix/conductor/test/integration/KafkaPublishTaskSpec.groovy b/test-harness/src/test/groovy/com/netflix/conductor/test/integration/KafkaPublishTaskSpec.groovy deleted file mode 100644 index fcb860a28d..0000000000 --- a/test-harness/src/test/groovy/com/netflix/conductor/test/integration/KafkaPublishTaskSpec.groovy +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.test.integration - -import org.springframework.beans.factory.annotation.Autowired - -import com.netflix.conductor.common.metadata.tasks.TaskDef -import com.netflix.conductor.common.metadata.tasks.TaskResult -import com.netflix.conductor.common.metadata.tasks.TaskType -import com.netflix.conductor.common.metadata.workflow.WorkflowDef -import com.netflix.conductor.common.metadata.workflow.WorkflowTask -import com.netflix.conductor.common.run.Workflow -import com.netflix.conductor.test.base.AbstractSpecification - -import com.fasterxml.jackson.databind.ObjectMapper -import spock.lang.Shared - -class KafkaPublishTaskSpec extends AbstractSpecification { - - @Autowired - ObjectMapper objectMapper - - @Shared - def isWorkflowRegistered = false - - def kafkaInput = ['requestDetails': ['key1': 'value1', 'key2': 42], - 'path1' : 'file://path1', - 'path2' : 'file://path2', - 'outputPath' : 's3://bucket/outputPath' - ] - - def expectedTaskInput = "{\"kafka_request\":{\"topic\":\"test_kafka_topic\",\"bootStrapServers\":\"localhost:9092\",\"value\":{\"requestDetails\":{\"key1\":\"value1\",\"key2\":42},\"outputPath\":\"s3://bucket/outputPath\",\"inputPaths\":[\"file://path1\",\"file://path2\"]}}}" - - def setup() { - if (!isWorkflowRegistered) { - registerKafkaWorkflow() - isWorkflowRegistered = true - } - } - - def "Test the kafka template usage failure case"() { - - given: "Start a workflow based on the registered workflow" - def workflowInstanceId = workflowExecutor.startWorkflow("template_kafka_workflow", 1, - "testTaskDefTemplate", kafkaInput, - null, null, null) - - and: "Get the workflow based on the Id that is being executed" - def workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true) - def task = workflow.tasks.get(0) - def taskInput = task.inputData - - when: "Ensure that the task is pollable and fail the task" - def polledTask = workflowExecutionService.poll('KAFKA_PUBLISH', 'test') - workflowExecutionService.ackTaskReceived(polledTask.taskId) - def taskResult = new TaskResult(polledTask) - taskResult.status = TaskResult.Status.FAILED - taskResult.reasonForIncompletion = 'NON TRANSIENT ERROR OCCURRED: An integration point required to complete the task is down' - taskResult.addOutputData("TERMINAL_ERROR", "Integration endpoint down: FOOBAR") - taskResult.addOutputData("ErrorMessage", "There was a terminal error") - workflowExecutionService.updateTask(taskResult) - - and: "Then run a decide to move the workflow forward" - workflowExecutor.decide(workflowInstanceId) - - and: "Get the updated workflow after the task result has been updated" - def updatedWorkflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true) - - then: "Check that the workflow is created and is not terminal" - workflowInstanceId - workflow - !workflow.getStatus().isTerminal() - !workflow.getReasonForIncompletion() - - and: "Check if the input of the next task to be polled is as expected for a kafka task" - taskInput - taskInput.containsKey('kafka_request') - taskInput['kafka_request'] instanceof Map - objectMapper.writeValueAsString(taskInput) == expectedTaskInput - - and: "Polled task is not null and the workflowInstanceId of the task is same as the workflow created initially" - polledTask - polledTask.workflowInstanceId == workflowInstanceId - - and: "The updated workflow is in a failed state" - updatedWorkflow - updatedWorkflow.status == Workflow.WorkflowStatus.FAILED - } - - def "Test the kafka template usage success case"() { - - given: "Start a workflow based on the registered kafka workflow" - def workflowInstanceId = workflowExecutor.startWorkflow("template_kafka_workflow", 1, - "testTaskDefTemplate", kafkaInput, - null, null, null) - - and: "Get the workflow based on the Id that is being executed" - def workflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true) - def task = workflow.tasks.get(0) - def taskInput = task.inputData - - when: "Ensure that the task is pollable and complete the task" - def polledTask = workflowExecutionService.poll('KAFKA_PUBLISH', 'test') - workflowExecutionService.ackTaskReceived(polledTask.taskId) - def taskResult = new TaskResult(polledTask) - taskResult.setStatus(TaskResult.Status.COMPLETED) - workflowExecutionService.updateTask(taskResult) - - and: "Then run a decide to move the workflow forward" - workflowExecutor.decide(workflowInstanceId) - - and: "Get the updated workflow after the task result has been updated" - def updatedWorkflow = workflowExecutionService.getExecutionStatus(workflowInstanceId, true) - - then: "Check that the workflow is created and is not terminal" - workflowInstanceId - workflow - !workflow.getStatus().isTerminal() - !workflow.getReasonForIncompletion() - - and: "Check if the input of the next task to be polled is as expected for a kafka task" - taskInput - taskInput.containsKey('kafka_request') - taskInput['kafka_request'] instanceof Map - objectMapper.writeValueAsString(taskInput) == expectedTaskInput - - and: "Polled task is not null and the workflowInstanceId of the task is same as the workflow created initially" - polledTask - polledTask.workflowInstanceId == workflowInstanceId - - and: "The updated workflow is complete" - updatedWorkflow - updatedWorkflow.status == Workflow.WorkflowStatus.COMPLETED - - } - - def registerKafkaWorkflow() { - System.setProperty("STACK_KAFKA", "test_kafka_topic") - TaskDef templatedTask = new TaskDef() - templatedTask.name = "templated_kafka_task" - templatedTask.retryCount = 0 - templatedTask.ownerEmail = "test@harness.com" - - def kafkaRequest = new HashMap<>() - kafkaRequest["topic"] = '${STACK_KAFKA}' - kafkaRequest["bootStrapServers"] = "localhost:9092" - - def value = new HashMap<>() - value["inputPaths"] = ['${workflow.input.path1}', '${workflow.input.path2}'] - value["requestDetails"] = '${workflow.input.requestDetails}' - value["outputPath"] = '${workflow.input.outputPath}' - kafkaRequest["value"] = value - - templatedTask.inputTemplate["kafka_request"] = kafkaRequest - metadataService.registerTaskDef([templatedTask]) - - WorkflowDef templateWf = new WorkflowDef() - templateWf.name = "template_kafka_workflow" - WorkflowTask wft = new WorkflowTask() - wft.name = templatedTask.name - wft.workflowTaskType = TaskType.KAFKA_PUBLISH - wft.taskReferenceName = "t0" - templateWf.tasks.add(wft) - templateWf.schemaVersion = 2 - templateWf.ownerEmail = "test@harness.com" - metadataService.registerWorkflowDef(templateWf) - } -} diff --git a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java b/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java index 5041524ea1..0353cd0486 100644 --- a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java +++ b/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/GrpcEndToEndTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 Netflix, Inc. + * Copyright 2022 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at diff --git a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java b/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java deleted file mode 100644 index 23d3854062..0000000000 --- a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/mysql/MySQLGrpcEndToEndTest.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.test.integration.grpc.mysql; - -import org.junit.Before; -import org.junit.runner.RunWith; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.client.grpc.EventClient; -import com.netflix.conductor.client.grpc.MetadataClient; -import com.netflix.conductor.client.grpc.TaskClient; -import com.netflix.conductor.client.grpc.WorkflowClient; -import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest; - -@RunWith(SpringRunner.class) -@TestPropertySource( - properties = { - "conductor.db.type=mysql", - "conductor.grpc-server.port=8094", - "spring.datasource.url=jdbc:tc:mysql:///conductor", // "tc" prefix starts the MySql - // container - "spring.datasource.username=root", - "spring.datasource.password=root", - "spring.datasource.hikari.maximum-pool-size=8", - "spring.datasource.hikari.minimum-idle=300000" - }) -public class MySQLGrpcEndToEndTest extends AbstractGrpcEndToEndTest { - - @Before - public void init() { - taskClient = new TaskClient("localhost", 8094); - workflowClient = new WorkflowClient("localhost", 8094); - metadataClient = new MetadataClient("localhost", 8094); - eventClient = new EventClient("localhost", 8094); - } -} diff --git a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java b/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java deleted file mode 100644 index 108cf09a8f..0000000000 --- a/test-harness/src/test/java/com/netflix/conductor/test/integration/grpc/postgres/PostgresGrpcEndToEndTest.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.test.integration.grpc.postgres; - -import org.junit.Before; -import org.junit.runner.RunWith; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.client.grpc.EventClient; -import com.netflix.conductor.client.grpc.MetadataClient; -import com.netflix.conductor.client.grpc.TaskClient; -import com.netflix.conductor.client.grpc.WorkflowClient; -import com.netflix.conductor.test.integration.grpc.AbstractGrpcEndToEndTest; - -@RunWith(SpringRunner.class) -@TestPropertySource( - properties = { - "conductor.db.type=postgres", - "conductor.grpc-server.port=8098", - "spring.datasource.url=jdbc:tc:postgresql:///conductor", // "tc" prefix starts the - // Postgres container - "spring.datasource.username=postgres", - "spring.datasource.password=postgres", - "spring.datasource.hikari.maximum-pool-size=8", - "spring.datasource.hikari.minimum-idle=300000" - }) -public class PostgresGrpcEndToEndTest extends AbstractGrpcEndToEndTest { - - @Before - public void init() { - taskClient = new TaskClient("localhost", 8098); - workflowClient = new WorkflowClient("localhost", 8098); - metadataClient = new MetadataClient("localhost", 8098); - eventClient = new EventClient("localhost", 8098); - } -} diff --git a/test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java b/test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java index 96745a14f3..0dded2cddf 100644 --- a/test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java +++ b/test-harness/src/test/java/com/netflix/conductor/test/integration/http/HttpEndToEndTest.java @@ -1,5 +1,5 @@ /* - * Copyright 2020 Netflix, Inc. + * Copyright 2022 Netflix, Inc. *

* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at diff --git a/test-harness/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java b/test-harness/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java deleted file mode 100644 index 30e8516e3f..0000000000 --- a/test-harness/src/test/java/com/netflix/conductor/test/listener/WorkflowStatusPublisherIntegrationTest.java +++ /dev/null @@ -1,210 +0,0 @@ -/* - * Copyright 2022 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.test.listener; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.springframework.beans.factory.annotation.Autowired; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.TestPropertySource; -import org.springframework.test.context.junit4.SpringRunner; - -import com.netflix.conductor.common.metadata.tasks.Task; -import com.netflix.conductor.common.metadata.tasks.TaskDef; -import com.netflix.conductor.common.metadata.tasks.TaskResult; -import com.netflix.conductor.common.metadata.workflow.WorkflowDef; -import com.netflix.conductor.common.metadata.workflow.WorkflowTask; -import com.netflix.conductor.common.run.Workflow; -import com.netflix.conductor.common.run.WorkflowSummary; -import com.netflix.conductor.core.events.queue.Message; -import com.netflix.conductor.core.execution.WorkflowExecutor; -import com.netflix.conductor.dao.QueueDAO; -import com.netflix.conductor.model.WorkflowModel; -import com.netflix.conductor.service.ExecutionService; -import com.netflix.conductor.service.MetadataService; - -import com.fasterxml.jackson.databind.ObjectMapper; - -import static com.netflix.conductor.common.metadata.tasks.Task.Status.COMPLETED; - -import static org.junit.Assert.assertEquals; - -@RunWith(SpringRunner.class) -@SpringBootTest( - properties = { - "conductor.workflow-status-listener.type=queue_publisher", - "conductor.workflow-status-listener.queue-publisher.successQueue=dummy", - "conductor.workflow-status-listener.queue-publisher.failureQueue=dummy", - "conductor.workflow-status-listener.queue-publisher.finalizeQueue=final" - }) -@TestPropertySource(locations = "classpath:application-integrationtest.properties") -public class WorkflowStatusPublisherIntegrationTest { - - private final String CALLBACK_QUEUE = "dummy"; - private final String FINALIZED_QUEUE = "final"; - private static final String LINEAR_WORKFLOW_T1_T2 = "junit_test_wf"; - private static final int WORKFLOW_VERSION = 1; - private static final String INCOMPLETION_REASON = "test reason"; - private static final String DEFAULT_OWNER_EMAIL = "test@harness.com"; - - @Autowired private ObjectMapper objectMapper; - - @Autowired QueueDAO queueDAO; - - @Autowired protected MetadataService metadataService; - - @Autowired protected ExecutionService workflowExecutionService; - - @Autowired protected WorkflowExecutor workflowExecutor; - - @Before - public void setUp() { - TaskDef taskDef = new TaskDef(); - taskDef.setName("junit_task_1"); - taskDef.setTimeoutSeconds(120); - taskDef.setResponseTimeoutSeconds(120); - taskDef.setRetryCount(1); - taskDef.setOwnerEmail(DEFAULT_OWNER_EMAIL); - metadataService.registerTaskDef(Collections.singletonList(taskDef)); - } - - @After - public void cleanUp() { - List workflows = - metadataService.getWorkflowDefs().stream() - .map(WorkflowDef::getName) - .collect(Collectors.toList()); - for (String wfName : workflows) { - List running = - workflowExecutionService.getRunningWorkflows(wfName, WORKFLOW_VERSION); - for (String wfid : running) { - workflowExecutor.terminateWorkflow(wfid, "cleanup"); - } - } - queueDAO.queuesDetail().keySet().forEach(queueDAO::flush); - } - - @Test - public void testListenerOnTerminatedWorkflow() throws IOException { - String id = - startOrLoadWorkflowExecution( - LINEAR_WORKFLOW_T1_T2, - 1, - "testWorkflowTerminatedListener", - new HashMap<>()); - workflowExecutor.terminateWorkflow(id, INCOMPLETION_REASON); - - List callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200); - queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); - - WorkflowSummary payload = - objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); - assertEquals(id, callbackMessages.get(0).getId()); - assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); - assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId()); - assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus()); - assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion()); - - // check finalized queue - callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200); - queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); - - payload = - objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); - assertEquals(id, callbackMessages.get(0).getId()); - assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); - assertEquals("testWorkflowTerminatedListener", payload.getCorrelationId()); - assertEquals(Workflow.WorkflowStatus.TERMINATED, payload.getStatus()); - assertEquals(INCOMPLETION_REASON, payload.getReasonForIncompletion()); - } - - @Test - public void testListenerOnCompletedWorkflow() throws IOException, InterruptedException { - WorkflowDef workflowDef = new WorkflowDef(); - workflowDef.setName(LINEAR_WORKFLOW_T1_T2); - workflowDef.setDescription(workflowDef.getName()); - workflowDef.setVersion(WORKFLOW_VERSION); - workflowDef.setSchemaVersion(2); - workflowDef.setOwnerEmail(DEFAULT_OWNER_EMAIL); - workflowDef.setWorkflowStatusListenerEnabled(true); - LinkedList wftasks = new LinkedList<>(); - - WorkflowTask wft1 = new WorkflowTask(); - wft1.setName("junit_task_1"); - wft1.setTaskReferenceName("t1"); - - wftasks.add(wft1); - workflowDef.setTasks(wftasks); - - metadataService.updateWorkflowDef(Collections.singletonList(workflowDef)); - - String id = - startOrLoadWorkflowExecution( - workflowDef.getName(), 1, "testWorkflowCompletedListener", new HashMap<>()); - - List tasks = workflowExecutionService.getTasks("junit_task_1", null, 1); - tasks.get(0).setStatus(COMPLETED); - workflowExecutionService.updateTask(new TaskResult(tasks.get(0))); - - checkIfWorkflowIsCompleted(id); - - List callbackMessages = queueDAO.pollMessages(CALLBACK_QUEUE, 1, 200); - queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); - - WorkflowSummary payload = - objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); - assertEquals(id, callbackMessages.get(0).getId()); - assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); - assertEquals("testWorkflowCompletedListener", payload.getCorrelationId()); - assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus()); - - // check finalized queue - callbackMessages = queueDAO.pollMessages(FINALIZED_QUEUE, 1, 200); - queueDAO.ack(CALLBACK_QUEUE, callbackMessages.get(0).getId()); - - payload = - objectMapper.readValue(callbackMessages.get(0).getPayload(), WorkflowSummary.class); - assertEquals(id, callbackMessages.get(0).getId()); - assertEquals(LINEAR_WORKFLOW_T1_T2, payload.getWorkflowType()); - assertEquals("testWorkflowCompletedListener", payload.getCorrelationId()); - assertEquals(Workflow.WorkflowStatus.COMPLETED, payload.getStatus()); - } - - @SuppressWarnings("BusyWait") - private void checkIfWorkflowIsCompleted(String id) throws InterruptedException { - int statusRetrieveAttempts = 0; - while (workflowExecutor.getWorkflow(id, false).getStatus() - != WorkflowModel.Status.COMPLETED) { - if (statusRetrieveAttempts > 5) { - break; - } - Thread.sleep(100); - statusRetrieveAttempts++; - } - } - - private String startOrLoadWorkflowExecution( - String workflowName, int version, String correlationId, Map input) { - return workflowExecutor.startWorkflow(workflowName, version, correlationId, input, null); - } -} diff --git a/zookeeper-lock/build.gradle b/zookeeper-lock/build.gradle deleted file mode 100644 index ea42008da8..0000000000 --- a/zookeeper-lock/build.gradle +++ /dev/null @@ -1,9 +0,0 @@ -dependencies { - implementation project(':conductor-core') - compileOnly 'org.springframework.boot:spring-boot-starter' - - implementation "org.apache.commons:commons-lang3" - implementation "org.apache.curator:curator-recipes:${revCuratorRecipes}" - - testImplementation "org.apache.curator:curator-test:${revCuratorTest}" -} diff --git a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperLockConfiguration.java b/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperLockConfiguration.java deleted file mode 100644 index 048f6c7a92..0000000000 --- a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperLockConfiguration.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.zookeeper.config; - -import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty; -import org.springframework.boot.context.properties.EnableConfigurationProperties; -import org.springframework.context.annotation.Bean; -import org.springframework.context.annotation.Configuration; - -import com.netflix.conductor.core.sync.Lock; -import com.netflix.conductor.zookeeper.lock.ZookeeperLock; - -@Configuration -@EnableConfigurationProperties(ZookeeperProperties.class) -@ConditionalOnProperty(name = "conductor.workflow-execution-lock.type", havingValue = "zookeeper") -public class ZookeeperLockConfiguration { - - @Bean - public Lock provideLock(ZookeeperProperties properties) { - return new ZookeeperLock(properties); - } -} diff --git a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperProperties.java b/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperProperties.java deleted file mode 100644 index 8620ac7b12..0000000000 --- a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/config/ZookeeperProperties.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.zookeeper.config; - -import java.time.Duration; - -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.springframework.boot.context.properties.ConfigurationProperties; - -@ConfigurationProperties("conductor.zookeeper-lock") -public class ZookeeperProperties { - - /** The connection string to be used to connect to the Zookeeper cluster */ - private String connectionString = "localhost:2181"; - - /** The session timeout for the curator */ - private Duration sessionTimeout = - Duration.ofMillis(CuratorFrameworkFactory.builder().getSessionTimeoutMs()); - - /** The connection timeout for the curator */ - private Duration connectionTimeout = - Duration.ofMillis(CuratorFrameworkFactory.builder().getConnectionTimeoutMs()); - - /** The namespace to use within the zookeeper cluster */ - private String namespace = ""; - - public String getConnectionString() { - return connectionString; - } - - public void setConnectionString(String connectionString) { - this.connectionString = connectionString; - } - - public Duration getSessionTimeout() { - return sessionTimeout; - } - - public void setSessionTimeout(Duration sessionTimeout) { - this.sessionTimeout = sessionTimeout; - } - - public Duration getConnectionTimeout() { - return connectionTimeout; - } - - public void setConnectionTimeout(Duration connectionTimeout) { - this.connectionTimeout = connectionTimeout; - } - - public String getNamespace() { - return namespace; - } - - public void setNamespace(String namespace) { - this.namespace = namespace; - } -} diff --git a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/lock/ZookeeperLock.java b/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/lock/ZookeeperLock.java deleted file mode 100644 index 714822131d..0000000000 --- a/zookeeper-lock/src/main/java/com/netflix/conductor/zookeeper/lock/ZookeeperLock.java +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.zookeeper.lock; - -import java.util.concurrent.TimeUnit; - -import org.apache.commons.lang3.StringUtils; -import org.apache.curator.RetryPolicy; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.framework.recipes.locks.InterProcessMutex; -import org.apache.curator.retry.ExponentialBackoffRetry; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.sync.Lock; -import com.netflix.conductor.zookeeper.config.ZookeeperProperties; - -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.CacheLoader; -import com.google.common.cache.LoadingCache; - -@SuppressWarnings("UnstableApiUsage") -public class ZookeeperLock implements Lock { - - public static final int CACHE_MAXSIZE = 20000; - public static final int CACHE_EXPIRY_TIME = 10; - - private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperLock.class); - private final CuratorFramework client; - private final LoadingCache zkLocks; - private final String zkPath; - - public ZookeeperLock(ZookeeperProperties properties) { - String lockNamespace = properties.getNamespace(); - RetryPolicy retryPolicy = new ExponentialBackoffRetry(1000, 3); - client = - CuratorFrameworkFactory.newClient( - properties.getConnectionString(), - (int) properties.getSessionTimeout().toMillis(), - (int) properties.getConnectionTimeout().toMillis(), - retryPolicy); - client.start(); - zkLocks = - CacheBuilder.newBuilder() - .maximumSize(CACHE_MAXSIZE) - .expireAfterAccess(CACHE_EXPIRY_TIME, TimeUnit.MINUTES) - .build( - new CacheLoader() { - @Override - public InterProcessMutex load(String key) { - return new InterProcessMutex(client, zkPath.concat(key)); - } - }); - - zkPath = - StringUtils.isEmpty(lockNamespace) - ? ("/conductor/") - : ("/conductor/" + lockNamespace + "/"); - } - - public void acquireLock(String lockId) { - if (StringUtils.isEmpty(lockId)) { - throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId); - } - try { - InterProcessMutex mutex = zkLocks.get(lockId); - mutex.acquire(); - } catch (Exception e) { - LOGGER.debug("Failed in acquireLock: ", e); - } - } - - public boolean acquireLock(String lockId, long timeToTry, TimeUnit unit) { - if (StringUtils.isEmpty(lockId)) { - throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId); - } - try { - InterProcessMutex mutex = zkLocks.get(lockId); - return mutex.acquire(timeToTry, unit); - } catch (Exception e) { - LOGGER.debug("Failed in acquireLock: ", e); - } - return false; - } - - public boolean acquireLock(String lockId, long timeToTry, long leaseTime, TimeUnit unit) { - return acquireLock(lockId, timeToTry, unit); - } - - public void releaseLock(String lockId) { - if (StringUtils.isEmpty(lockId)) { - throw new IllegalArgumentException("lockId cannot be NULL or empty: lockId=" + lockId); - } - try { - InterProcessMutex lock = zkLocks.getIfPresent(lockId); - if (lock != null) { - lock.release(); - } - } catch (Exception e) { - LOGGER.debug("Failed in releaseLock: ", e); - } - } - - public void deleteLock(String lockId) { - try { - LOGGER.debug("Deleting lock {}", zkPath.concat(lockId)); - client.delete().guaranteed().forPath(zkPath.concat(lockId)); - } catch (Exception e) { - LOGGER.debug("Failed to removeLock: ", e); - } - } -} diff --git a/zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper/lock/ZookeeperLockTest.java b/zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper/lock/ZookeeperLockTest.java deleted file mode 100644 index 9570ca41d1..0000000000 --- a/zookeeper-lock/src/test/java/com/netflix/conductor/zookeeper/lock/ZookeeperLockTest.java +++ /dev/null @@ -1,177 +0,0 @@ -/* - * Copyright 2020 Netflix, Inc. - *

- * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ -package com.netflix.conductor.zookeeper.lock; - -import java.time.Duration; -import java.util.concurrent.Semaphore; -import java.util.concurrent.TimeUnit; - -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.test.TestingServer; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import com.netflix.conductor.core.sync.Lock; -import com.netflix.conductor.service.ExecutionLockService; -import com.netflix.conductor.zookeeper.config.ZookeeperProperties; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - -public class ZookeeperLockTest { - - private static final Logger LOGGER = LoggerFactory.getLogger(ZookeeperLockTest.class); - - TestingServer zkServer; - ZookeeperProperties properties; - - @Before - public void setUp() throws Exception { - zkServer = new TestingServer(2181); - properties = mock(ZookeeperProperties.class); - when(properties.getConnectionString()).thenReturn("localhost:2181"); - when(properties.getSessionTimeout()) - .thenReturn( - Duration.ofMillis(CuratorFrameworkFactory.builder().getSessionTimeoutMs())); - when(properties.getConnectionTimeout()) - .thenReturn( - Duration.ofMillis( - CuratorFrameworkFactory.builder().getConnectionTimeoutMs())); - when(properties.getNamespace()).thenReturn(""); - } - - @After - public void tearDown() throws Exception { - zkServer.stop(); - } - - @Test - public void testLockReentrance() { - Lock zkLock = new ZookeeperLock(properties); - boolean hasLock = zkLock.acquireLock("reentrantLock1", 50, TimeUnit.MILLISECONDS); - assertTrue(hasLock); - - hasLock = zkLock.acquireLock("reentrantLock1", 50, TimeUnit.MILLISECONDS); - assertTrue(hasLock); - zkLock.releaseLock("reentrantLock1"); - zkLock.releaseLock("reentrantLock1"); - } - - @Test - public void testZkLock() throws InterruptedException { - Lock zkLock = new ZookeeperLock(properties); - String lock1 = "lock1"; - String lock2 = "lock2"; - - Worker worker1 = new Worker(zkLock, lock1); - worker1.start(); - worker1.lockNotify.acquire(); - assertTrue(worker1.isLocked); - Thread.sleep(30000); - - Worker worker2 = new Worker(zkLock, lock1); - worker2.start(); - assertTrue(worker2.isAlive()); - assertFalse(worker2.isLocked); - Thread.sleep(30000); - - Worker worker3 = new Worker(zkLock, lock2); - worker3.start(); - worker3.lockNotify.acquire(); - assertTrue(worker3.isLocked); - Thread.sleep(30000); - - worker1.unlockNotify.release(); - worker1.join(); - - Thread.sleep(30000); - worker2.lockNotify.acquire(); - assertTrue(worker2.isLocked); - worker2.unlockNotify.release(); - worker2.join(); - - worker3.unlockNotify.release(); - worker3.join(); - } - - private static class Worker extends Thread { - - private final Lock lock; - private final String lockID; - Semaphore unlockNotify = new Semaphore(0); - Semaphore lockNotify = new Semaphore(0); - boolean isLocked = false; - - Worker(Lock lock, String lockID) { - super("TestWorker-" + lockID); - this.lock = lock; - this.lockID = lockID; - } - - @Override - public void run() { - lock.acquireLock(lockID, 5, TimeUnit.MILLISECONDS); - isLocked = true; - lockNotify.release(); - try { - unlockNotify.acquire(); - } catch (Exception e) { - e.printStackTrace(); - } finally { - isLocked = false; - lock.releaseLock(lockID); - } - } - } - - private static class MultiLockWorker extends Thread { - - private final ExecutionLockService lock; - private final Iterable lockIDs; - private boolean finishedSuccessfully = false; - - public MultiLockWorker(ExecutionLockService executionLock, Iterable lockIDs) { - super(); - this.lock = executionLock; - this.lockIDs = lockIDs; - } - - @Override - public void run() { - try { - int iterations = 0; - for (String lockID : lockIDs) { - lock.acquireLock(lockID); - Thread.sleep(100); - lock.releaseLock(lockID); - iterations++; - if (iterations % 10 == 0) { - LOGGER.info("Finished iterations: {}", iterations); - } - } - finishedSuccessfully = true; - } catch (InterruptedException e) { - e.printStackTrace(); - } - } - - public boolean isFinishedSuccessfully() { - return finishedSuccessfully; - } - } -}