From 6f31fd97b05193f3bf3ee6e33f282e8b1415ddea Mon Sep 17 00:00:00 2001 From: Yu Xia Date: Wed, 12 Feb 2020 17:53:28 -0800 Subject: [PATCH] Add operation APIs for history replication DLQ (#3019) * Integrate CLI with history replication DLQ * Add history replication DLQ --- .gen/go/history/history.go | 3127 ++++++++++++++--- .../go/history/historyserviceclient/client.go | 87 + .../go/history/historyserviceserver/server.go | 107 +- .gen/go/history/historyservicetest/client.go | 97 + client/history/client.go | 44 +- client/history/metricClient.go | 51 + client/history/retryableClient.go | 47 + common/metrics/defs.go | 28 +- common/mocks/ExecutionManager.go | 34 + .../cassandra/cassandraPersistence.go | 69 +- common/persistence/dataInterfaces.go | 16 + common/persistence/executionStore.go | 12 + .../persistence-tests/executionManagerTest.go | 49 + .../persistence-tests/persistenceTestBase.go | 58 + common/persistence/persistenceInterface.go | 2 + .../persistence/persistenceMetricClients.go | 32 + .../persistenceRateLimitedClients.go | 20 + common/persistence/sql/sqlExecutionManager.go | 37 + .../persistence/sql/sqlplugin/interfaces.go | 6 + .../sql/sqlplugin/mysql/execution.go | 39 + .../sql/sqlplugin/postgres/execution.go | 39 + idls | 2 +- schema/cassandra/cadence/schema.cql | 2 + .../cadence/versioned/v0.25/manifest.json | 8 + .../versioned/v0.25/replication_dlq.cql | 1 + schema/cassandra/version.go | 2 +- service/frontend/adminHandler.go | 6 +- service/history/handler.go | 67 +- service/history/historyEngine.go | 91 +- service/history/historyEngine_mock.go | 44 + service/history/replicationDLQHandler.go | 216 ++ service/history/replicationDLQHandler_test.go | 228 ++ service/history/replicationTaskExecutor.go | 343 ++ .../history/replicationTaskExecutor_mock.go | 74 + .../history/replicationTaskExecutor_test.go | 275 ++ service/history/replicationTaskProcessor.go | 337 +- .../history/replicationTaskProcessor_test.go | 231 +- service/history/shardContext.go | 24 + 38 files changed, 4870 insertions(+), 1082 deletions(-) create mode 100644 schema/cassandra/cadence/versioned/v0.25/manifest.json create mode 100644 schema/cassandra/cadence/versioned/v0.25/replication_dlq.cql create mode 100644 service/history/replicationDLQHandler.go create mode 100644 service/history/replicationDLQHandler_test.go create mode 100644 service/history/replicationTaskExecutor.go create mode 100644 service/history/replicationTaskExecutor_mock.go create mode 100644 service/history/replicationTaskExecutor_test.go diff --git a/.gen/go/history/history.go b/.gen/go/history/history.go index 65526d563c4..71a5f02da12 100644 --- a/.gen/go/history/history.go +++ b/.gen/go/history/history.go @@ -12666,7 +12666,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Name: "history", Package: "github.com/uber/cadence/.gen/go/history", FilePath: "history.thrift", - SHA1: "e853c4c2cea233e915d16dfc06296c768153108c", + SHA1: "af9122e484fc3dcd9505605dc8fd5a27639d6970", Includes: []*thriftreflect.ThriftModule{ replicator.ThriftModule, shared.ThriftModule, @@ -12674,7 +12674,7 @@ var ThriftModule = &thriftreflect.ThriftModule{ Raw: rawIDL, } -const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n 180: optional bool isStickyTaskListEnabled\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domainUIID\n 20: optional shared.RefreshWorkflowTasksRequest request\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n 140: optional map queries\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n 150: optional shared.VersionHistory versionHistory\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.QueryWorkflowRequest request\n}\n\nstruct QueryWorkflowResponse {\n 10: optional shared.QueryWorkflowResponse response\n}\n\nstruct ReapplyEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.ReapplyEventsRequest request\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.RetryTaskV2Error retryTaskV2Error,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetReplicationMessages return replication messages based on the read level\n **/\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * GetDLQReplicationMessages return replication messages based on dlq info\n **/\n replicator.GetDLQReplicationMessagesResponse GetDLQReplicationMessages(1: replicator.GetDLQReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.EntityNotExistsError entityNotExistError,\n )\n}\n" +const rawIDL = "// Copyright (c) 2017 Uber Technologies, Inc.\n//\n// Permission is hereby granted, free of charge, to any person obtaining a copy\n// of this software and associated documentation files (the \"Software\"), to deal\n// in the Software without restriction, including without limitation the rights\n// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n// copies of the Software, and to permit persons to whom the Software is\n// furnished to do so, subject to the following conditions:\n//\n// The above copyright notice and this permission notice shall be included in\n// all copies or substantial portions of the Software.\n//\n// THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n// THE SOFTWARE.\n\ninclude \"shared.thrift\"\ninclude \"replicator.thrift\"\n\nnamespace java com.uber.cadence.history\n\nexception EventAlreadyStartedError {\n 1: required string message\n}\n\nexception ShardOwnershipLostError {\n 10: optional string message\n 20: optional string owner\n}\n\nstruct ParentExecutionInfo {\n 10: optional string domainUUID\n 15: optional string domain\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") initiatedId\n}\n\nstruct StartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.StartWorkflowExecutionRequest startRequest\n 30: optional ParentExecutionInfo parentExecutionInfo\n 40: optional i32 attempt\n 50: optional i64 (js.type = \"Long\") expirationTimestamp\n 55: optional shared.ContinueAsNewInitiator continueAsNewInitiator\n 56: optional string continuedFailureReason\n 57: optional binary continuedFailureDetails\n 58: optional binary lastCompletionResult\n 60: optional i32 firstDecisionTaskBackoffSeconds\n}\n\nstruct DescribeMutableStateRequest{\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct DescribeMutableStateResponse{\n 30: optional string mutableStateInCache\n 40: optional string mutableStateInDatabase\n}\n\nstruct GetMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct GetMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n //TODO: isWorkflowRunning is deprecating. workflowState is going replace this field\n 100: optional bool isWorkflowRunning\n 110: optional i32 stickyTaskListScheduleToStartTimeout\n 120: optional i32 eventStoreVersion\n 130: optional binary currentBranchToken\n 140: optional map replicationInfo\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 150: optional i32 workflowState\n 160: optional i32 workflowCloseState\n 170: optional shared.VersionHistories versionHistories\n 180: optional bool isStickyTaskListEnabled\n}\n\nstruct PollMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n 30: optional i64 (js.type = \"Long\") expectedNextEventId\n 40: optional binary currentBranchToken\n}\n\nstruct PollMutableStateResponse {\n 10: optional shared.WorkflowExecution execution\n 20: optional shared.WorkflowType workflowType\n 30: optional i64 (js.type = \"Long\") NextEventId\n 35: optional i64 (js.type = \"Long\") PreviousStartedEventId\n 40: optional i64 (js.type = \"Long\") LastFirstEventId\n 50: optional shared.TaskList taskList\n 60: optional shared.TaskList stickyTaskList\n 70: optional string clientLibraryVersion\n 80: optional string clientFeatureVersion\n 90: optional string clientImpl\n 100: optional i32 stickyTaskListScheduleToStartTimeout\n 110: optional binary currentBranchToken\n 120: optional map replicationInfo\n 130: optional shared.VersionHistories versionHistories\n // TODO: when migrating to gRPC, make this a enum\n // TODO: when migrating to gRPC, unify internal & external representation\n // NOTE: workflowState & workflowCloseState are the same as persistence representation\n 140: optional i32 workflowState\n 150: optional i32 workflowCloseState\n}\n\nstruct ResetStickyTaskListRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution execution\n}\n\nstruct ResetStickyTaskListResponse {\n // The reason to keep this response is to allow returning\n // information in the future.\n}\n\nstruct RespondDecisionTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskCompletedRequest completeRequest\n}\n\nstruct RespondDecisionTaskCompletedResponse {\n 10: optional RecordDecisionTaskStartedResponse startedResponse\n}\n\nstruct RespondDecisionTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondDecisionTaskFailedRequest failedRequest\n}\n\nstruct RecordActivityTaskHeartbeatRequest {\n 10: optional string domainUUID\n 20: optional shared.RecordActivityTaskHeartbeatRequest heartbeatRequest\n}\n\nstruct RespondActivityTaskCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCompletedRequest completeRequest\n}\n\nstruct RespondActivityTaskFailedRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskFailedRequest failedRequest\n}\n\nstruct RespondActivityTaskCanceledRequest {\n 10: optional string domainUUID\n 20: optional shared.RespondActivityTaskCanceledRequest cancelRequest\n}\n\nstruct RefreshWorkflowTasksRequest {\n 10: optional string domainUIID\n 20: optional shared.RefreshWorkflowTasksRequest request\n}\n\nstruct RecordActivityTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForActivityTaskRequest pollRequest\n}\n\nstruct RecordActivityTaskStartedResponse {\n 20: optional shared.HistoryEvent scheduledEvent\n 30: optional i64 (js.type = \"Long\") startedTimestamp\n 40: optional i64 (js.type = \"Long\") attempt\n 50: optional i64 (js.type = \"Long\") scheduledTimestampOfThisAttempt\n 60: optional binary heartbeatDetails\n 70: optional shared.WorkflowType workflowType\n 80: optional string workflowDomain\n}\n\nstruct RecordDecisionTaskStartedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") scheduleId\n 40: optional i64 (js.type = \"Long\") taskId\n 45: optional string requestId // Unique id of each poll request. Used to ensure at most once delivery of tasks.\n 50: optional shared.PollForDecisionTaskRequest pollRequest\n}\n\nstruct RecordDecisionTaskStartedResponse {\n 10: optional shared.WorkflowType workflowType\n 20: optional i64 (js.type = \"Long\") previousStartedEventId\n 30: optional i64 (js.type = \"Long\") scheduledEventId\n 40: optional i64 (js.type = \"Long\") startedEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") attempt\n 70: optional bool stickyExecutionEnabled\n 80: optional shared.TransientDecisionInfo decisionInfo\n 90: optional shared.TaskList WorkflowExecutionTaskList\n 100: optional i32 eventStoreVersion\n 110: optional binary branchToken\n 120: optional i64 (js.type = \"Long\") scheduledTimestamp\n 130: optional i64 (js.type = \"Long\") startedTimestamp\n 140: optional map queries\n}\n\nstruct SignalWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWorkflowExecutionRequest signalRequest\n 30: optional shared.WorkflowExecution externalWorkflowExecution\n 40: optional bool childWorkflowOnly\n}\n\nstruct SignalWithStartWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.SignalWithStartWorkflowExecutionRequest signalWithStartRequest\n}\n\nstruct RemoveSignalMutableStateRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional string requestId\n}\n\nstruct TerminateWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.TerminateWorkflowExecutionRequest terminateRequest\n}\n\nstruct ResetWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.ResetWorkflowExecutionRequest resetRequest\n}\n\nstruct RequestCancelWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.RequestCancelWorkflowExecutionRequest cancelRequest\n 30: optional i64 (js.type = \"Long\") externalInitiatedEventId\n 40: optional shared.WorkflowExecution externalWorkflowExecution\n 50: optional bool childWorkflowOnly\n}\n\nstruct ScheduleDecisionTaskRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional bool isFirstDecision\n}\n\nstruct DescribeWorkflowExecutionRequest {\n 10: optional string domainUUID\n 20: optional shared.DescribeWorkflowExecutionRequest request\n}\n\n/**\n* RecordChildExecutionCompletedRequest is used for reporting the completion of child execution to parent workflow\n* execution which started it. When a child execution is completed it creates this request and calls the\n* RecordChildExecutionCompleted API with the workflowExecution of parent. It also sets the completedExecution of the\n* child as it could potentially be different than the ChildExecutionStartedEvent of parent in the situation when\n* child creates multiple runs through ContinueAsNew before finally completing.\n**/\nstruct RecordChildExecutionCompletedRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional i64 (js.type = \"Long\") initiatedId\n 40: optional shared.WorkflowExecution completedExecution\n 50: optional shared.HistoryEvent completionEvent\n}\n\nstruct ReplicateEventsRequest {\n 10: optional string sourceCluster\n 20: optional string domainUUID\n 30: optional shared.WorkflowExecution workflowExecution\n 40: optional i64 (js.type = \"Long\") firstEventId\n 50: optional i64 (js.type = \"Long\") nextEventId\n 60: optional i64 (js.type = \"Long\") version\n 70: optional map replicationInfo\n 80: optional shared.History history\n 90: optional shared.History newRunHistory\n 100: optional bool forceBufferEvents // this attribute is deprecated\n 110: optional i32 eventStoreVersion\n 120: optional i32 newRunEventStoreVersion\n 130: optional bool resetWorkflow\n 140: optional bool newRunNDC\n}\n\nstruct ReplicateRawEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional map replicationInfo\n 40: optional shared.DataBlob history\n 50: optional shared.DataBlob newRunHistory\n 60: optional i32 eventStoreVersion\n 70: optional i32 newRunEventStoreVersion\n}\n\nstruct ReplicateEventsV2Request {\n 10: optional string domainUUID\n 20: optional shared.WorkflowExecution workflowExecution\n 30: optional list versionHistoryItems\n 40: optional shared.DataBlob events\n // new run events does not need version history since there is no prior events\n 60: optional shared.DataBlob newRunEvents\n}\n\nstruct SyncShardStatusRequest {\n 10: optional string sourceCluster\n 20: optional i64 (js.type = \"Long\") shardId\n 30: optional i64 (js.type = \"Long\") timestamp\n}\n\nstruct SyncActivityRequest {\n 10: optional string domainId\n 20: optional string workflowId\n 30: optional string runId\n 40: optional i64 (js.type = \"Long\") version\n 50: optional i64 (js.type = \"Long\") scheduledId\n 60: optional i64 (js.type = \"Long\") scheduledTime\n 70: optional i64 (js.type = \"Long\") startedId\n 80: optional i64 (js.type = \"Long\") startedTime\n 90: optional i64 (js.type = \"Long\") lastHeartbeatTime\n 100: optional binary details\n 110: optional i32 attempt\n 120: optional string lastFailureReason\n 130: optional string lastWorkerIdentity\n 140: optional binary lastFailureDetails\n 150: optional shared.VersionHistory versionHistory\n}\n\nstruct QueryWorkflowRequest {\n 10: optional string domainUUID\n 20: optional shared.QueryWorkflowRequest request\n}\n\nstruct QueryWorkflowResponse {\n 10: optional shared.QueryWorkflowResponse response\n}\n\nstruct ReapplyEventsRequest {\n 10: optional string domainUUID\n 20: optional shared.ReapplyEventsRequest request\n}\n\n/**\n* HistoryService provides API to start a new long running workflow instance, as well as query and update the history\n* of workflow instances already created.\n**/\nservice HistoryService {\n /**\n * StartWorkflowExecution starts a new long running workflow instance. It will create the instance with\n * 'WorkflowExecutionStarted' event in history and also schedule the first DecisionTask for the worker to make the\n * first decision for this instance. It will return 'WorkflowExecutionAlreadyStartedError', if an instance already\n * exists with same workflowId.\n **/\n shared.StartWorkflowExecutionResponse StartWorkflowExecution(1: StartWorkflowExecutionRequest startRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.WorkflowExecutionAlreadyStartedError sessionAlreadyExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n GetMutableStateResponse GetMutableState(1: GetMutableStateRequest getRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Returns the information from mutable state of workflow execution.\n * It fails with 'EntityNotExistError' if specified workflow execution in unknown to the service.\n * It returns CurrentBranchChangedError if the workflow version branch has changed.\n **/\n PollMutableStateResponse PollMutableState(1: PollMutableStateRequest pollRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.CurrentBranchChangedError currentBranchChangedError,\n )\n\n /**\n * Reset the sticky tasklist related information in mutable state of a given workflow.\n * Things cleared are:\n * 1. StickyTaskList\n * 2. StickyScheduleToStartTimeout\n * 3. ClientLibraryVersion\n * 4. ClientFeatureVersion\n * 5. ClientImpl\n **/\n ResetStickyTaskListResponse ResetStickyTaskList(1: ResetStickyTaskListRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordDecisionTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForDecisionTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordDecisionTaskStartedResponse RecordDecisionTaskStarted(1: RecordDecisionTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskStarted is called by the Matchingservice before it hands a decision task to the application worker in response to\n * a PollForActivityTask call. It records in the history the event that the decision task has started. It will return 'EventAlreadyStartedError',\n * if the workflow's execution history already includes a record of the event starting.\n **/\n RecordActivityTaskStartedResponse RecordActivityTaskStarted(1: RecordActivityTaskStartedRequest addRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: EventAlreadyStartedError eventAlreadyStartedError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskCompleted is called by application worker to complete a DecisionTask handed as a result of\n * 'PollForDecisionTask' API call. Completing a DecisionTask will result in new events for the workflow execution and\n * potentially new ActivityTask being created for corresponding decisions. It will also create a DecisionTaskCompleted\n * event in the history for that session. Use the 'taskToken' provided as response of PollForDecisionTask API call\n * for completing the DecisionTask.\n **/\n RespondDecisionTaskCompletedResponse RespondDecisionTaskCompleted(1: RespondDecisionTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondDecisionTaskFailed is called by application worker to indicate failure. This results in\n * DecisionTaskFailedEvent written to the history and a new DecisionTask created. This API can be used by client to\n * either clear sticky tasklist or report ny panics during DecisionTask processing.\n **/\n void RespondDecisionTaskFailed(1: RespondDecisionTaskFailedRequest failedRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordActivityTaskHeartbeat is called by application worker while it is processing an ActivityTask. If worker fails\n * to heartbeat within 'heartbeatTimeoutSeconds' interval for the ActivityTask, then it will be marked as timedout and\n * 'ActivityTaskTimedOut' event will be written to the workflow history. Calling 'RecordActivityTaskHeartbeat' will\n * fail with 'EntityNotExistsError' in such situations. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for heartbeating.\n **/\n shared.RecordActivityTaskHeartbeatResponse RecordActivityTaskHeartbeat(1: RecordActivityTaskHeartbeatRequest heartbeatRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCompleted is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskCompleted' event being written to the workflow history and a new DecisionTask\n * created for the workflow so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCompleted(1: RespondActivityTaskCompletedRequest completeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskFailed is called by application worker when it is done processing an ActivityTask. It will\n * result in a new 'ActivityTaskFailed' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskFailed(1: RespondActivityTaskFailedRequest failRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RespondActivityTaskCanceled is called by application worker when it is successfully canceled an ActivityTask. It will\n * result in a new 'ActivityTaskCanceled' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. Use the 'taskToken' provided as response of\n * PollForActivityTask API call for completion. It fails with 'EntityNotExistsError' if the taskToken is not valid\n * anymore due to activity timeout.\n **/\n void RespondActivityTaskCanceled(1: RespondActivityTaskCanceledRequest canceledRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SignalWorkflowExecution is used to send a signal event to running workflow execution. This results in\n * WorkflowExecutionSignaled event recorded in the history and a decision task being created for the execution.\n **/\n void SignalWorkflowExecution(1: SignalWorkflowExecutionRequest signalRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * SignalWithStartWorkflowExecution is used to ensure sending a signal event to a workflow execution.\n * If workflow is running, this results in WorkflowExecutionSignaled event recorded in the history\n * and a decision task being created for the execution.\n * If workflow is not running or not found, it will first try start workflow with given WorkflowIDResuePolicy,\n * and record WorkflowExecutionStarted and WorkflowExecutionSignaled event in case of success.\n * It will return `WorkflowExecutionAlreadyStartedError` if start workflow failed with given policy.\n **/\n shared.StartWorkflowExecutionResponse SignalWithStartWorkflowExecution(1: SignalWithStartWorkflowExecutionRequest signalWithStartRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: ShardOwnershipLostError shardOwnershipLostError,\n 4: shared.DomainNotActiveError domainNotActiveError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n 7: shared.WorkflowExecutionAlreadyStartedError workflowAlreadyStartedError,\n )\n\n /**\n * RemoveSignalMutableState is used to remove a signal request ID that was previously recorded. This is currently\n * used to clean execution info when signal decision finished.\n **/\n void RemoveSignalMutableState(1: RemoveSignalMutableStateRequest removeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * TerminateWorkflowExecution terminates an existing workflow execution by recording WorkflowExecutionTerminated event\n * in the history and immediately terminating the execution instance.\n **/\n void TerminateWorkflowExecution(1: TerminateWorkflowExecutionRequest terminateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ResetWorkflowExecution reset an existing workflow execution by a firstEventID of a existing event batch\n * in the history and immediately terminating the current execution instance.\n * After reset, the history will grow from nextFirstEventID.\n **/\n shared.ResetWorkflowExecutionResponse ResetWorkflowExecution(1: ResetWorkflowExecutionRequest resetRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RequestCancelWorkflowExecution is called by application worker when it wants to request cancellation of a workflow instance.\n * It will result in a new 'WorkflowExecutionCancelRequested' event being written to the workflow history and a new DecisionTask\n * created for the workflow instance so new decisions could be made. It fails with 'EntityNotExistsError' if the workflow is not valid\n * anymore due to completion or doesn't exist.\n **/\n void RequestCancelWorkflowExecution(1: RequestCancelWorkflowExecutionRequest cancelRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.CancellationAlreadyRequestedError cancellationAlreadyRequestedError,\n 6: shared.DomainNotActiveError domainNotActiveError,\n 7: shared.LimitExceededError limitExceededError,\n 8: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * ScheduleDecisionTask is used for creating a decision task for already started workflow execution. This is mainly\n * used by transfer queue processor during the processing of StartChildWorkflowExecution task, where it first starts\n * child execution without creating the decision task and then calls this API after updating the mutable state of\n * parent execution.\n **/\n void ScheduleDecisionTask(1: ScheduleDecisionTaskRequest scheduleRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * RecordChildExecutionCompleted is used for reporting the completion of child workflow execution to parent.\n * This is mainly called by transfer queue processor during the processing of DeleteExecution task.\n **/\n void RecordChildExecutionCompleted(1: RecordChildExecutionCompletedRequest completionRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.DomainNotActiveError domainNotActiveError,\n 6: shared.LimitExceededError limitExceededError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * DescribeWorkflowExecution returns information about the specified workflow execution.\n **/\n shared.DescribeWorkflowExecutionResponse DescribeWorkflowExecution(1: DescribeWorkflowExecutionRequest describeRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEvents(1: ReplicateEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateRawEvents(1: ReplicateRawEventsRequest replicateRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n void ReplicateEventsV2(1: ReplicateEventsV2Request replicateV2Request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.RetryTaskV2Error retryTaskError,\n 7: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncShardStatus sync the status between shards\n **/\n void SyncShardStatus(1: SyncShardStatusRequest syncShardStatusRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.LimitExceededError limitExceededError,\n 6: shared.ServiceBusyError serviceBusyError,\n )\n\n /**\n * SyncActivity sync the activity status\n **/\n void SyncActivity(1: SyncActivityRequest syncActivityRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.RetryTaskError retryTaskError,\n 7: shared.RetryTaskV2Error retryTaskV2Error,\n )\n\n /**\n * DescribeMutableState returns information about the internal states of workflow mutable state.\n **/\n DescribeMutableStateResponse DescribeMutableState(1: DescribeMutableStateRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.EntityNotExistsError entityNotExistError,\n 4: shared.AccessDeniedError accessDeniedError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n 6: shared.LimitExceededError limitExceededError,\n )\n\n /**\n * DescribeHistoryHost returns information about the internal states of a history host\n **/\n shared.DescribeHistoryHostResponse DescribeHistoryHost(1: shared.DescribeHistoryHostRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * CloseShard close the shard\n **/\n void CloseShard(1: shared.CloseShardRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * RemoveTask remove task based on type, taskid, shardid\n **/\n void RemoveTask(1: shared.RemoveTaskRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.AccessDeniedError accessDeniedError,\n )\n\n /**\n * GetReplicationMessages return replication messages based on the read level\n **/\n replicator.GetReplicationMessagesResponse GetReplicationMessages(1: replicator.GetReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.LimitExceededError limitExceededError,\n 4: shared.ServiceBusyError serviceBusyError,\n 5: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n )\n\n /**\n * GetDLQReplicationMessages return replication messages based on dlq info\n **/\n replicator.GetDLQReplicationMessagesResponse GetDLQReplicationMessages(1: replicator.GetDLQReplicationMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n )\n\n /**\n * QueryWorkflow returns query result for a specified workflow execution\n **/\n QueryWorkflowResponse QueryWorkflow(1: QueryWorkflowRequest queryRequest)\n\tthrows (\n\t 1: shared.BadRequestError badRequestError,\n\t 2: shared.InternalServiceError internalServiceError,\n\t 3: shared.EntityNotExistsError entityNotExistError,\n\t 4: shared.QueryFailedError queryFailedError,\n\t 5: shared.LimitExceededError limitExceededError,\n\t 6: shared.ServiceBusyError serviceBusyError,\n\t 7: shared.ClientVersionNotSupportedError clientVersionNotSupportedError,\n\t)\n\n /**\n * ReapplyEvents applies stale events to the current workflow and current run\n **/\n void ReapplyEvents(1: ReapplyEventsRequest reapplyEventsRequest)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: shared.LimitExceededError limitExceededError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: ShardOwnershipLostError shardOwnershipLostError,\n 7: shared.EntityNotExistsError entityNotExistError,\n )\n\n /**\n * RefreshWorkflowTasks refreshes all tasks of a workflow\n **/\n void RefreshWorkflowTasks(1: RefreshWorkflowTasksRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.DomainNotActiveError domainNotActiveError,\n 4: ShardOwnershipLostError shardOwnershipLostError,\n 5: shared.ServiceBusyError serviceBusyError,\n 6: shared.EntityNotExistsError entityNotExistError,\n )\n\n /**\n * ReadDLQMessages returns messages from DLQ\n **/\n replicator.ReadDLQMessagesResponse ReadDLQMessages(1: replicator.ReadDLQMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n )\n\n /**\n * PurgeDLQMessages purges messages from DLQ\n **/\n void PurgeDLQMessages(1: replicator.PurgeDLQMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n )\n\n /**\n * MergeDLQMessages merges messages from DLQ\n **/\n replicator.MergeDLQMessagesResponse MergeDLQMessages(1: replicator.MergeDLQMessagesRequest request)\n throws (\n 1: shared.BadRequestError badRequestError,\n 2: shared.InternalServiceError internalServiceError,\n 3: shared.ServiceBusyError serviceBusyError,\n 4: shared.EntityNotExistsError entityNotExistError,\n 5: ShardOwnershipLostError shardOwnershipLostError,\n )\n}\n" // HistoryService_CloseShard_Args represents the arguments for the HistoryService.CloseShard function. // @@ -17507,14 +17507,14 @@ func (v *HistoryService_GetReplicationMessages_Result) EnvelopeType() wire.Envel return wire.Reply } -// HistoryService_PollMutableState_Args represents the arguments for the HistoryService.PollMutableState function. +// HistoryService_MergeDLQMessages_Args represents the arguments for the HistoryService.MergeDLQMessages function. // -// The arguments for PollMutableState are sent and received over the wire as this struct. -type HistoryService_PollMutableState_Args struct { - PollRequest *PollMutableStateRequest `json:"pollRequest,omitempty"` +// The arguments for MergeDLQMessages are sent and received over the wire as this struct. +type HistoryService_MergeDLQMessages_Args struct { + Request *replicator.MergeDLQMessagesRequest `json:"request,omitempty"` } -// ToWire translates a HistoryService_PollMutableState_Args struct into a Thrift-level intermediate +// ToWire translates a HistoryService_MergeDLQMessages_Args struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -17529,7 +17529,7 @@ type HistoryService_PollMutableState_Args struct { // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *HistoryService_PollMutableState_Args) ToWire() (wire.Value, error) { +func (v *HistoryService_MergeDLQMessages_Args) ToWire() (wire.Value, error) { var ( fields [1]wire.Field i int = 0 @@ -17537,8 +17537,8 @@ func (v *HistoryService_PollMutableState_Args) ToWire() (wire.Value, error) { err error ) - if v.PollRequest != nil { - w, err = v.PollRequest.ToWire() + if v.Request != nil { + w, err = v.Request.ToWire() if err != nil { return w, err } @@ -17549,17 +17549,17 @@ func (v *HistoryService_PollMutableState_Args) ToWire() (wire.Value, error) { return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _PollMutableStateRequest_Read(w wire.Value) (*PollMutableStateRequest, error) { - var v PollMutableStateRequest +func _MergeDLQMessagesRequest_Read(w wire.Value) (*replicator.MergeDLQMessagesRequest, error) { + var v replicator.MergeDLQMessagesRequest err := v.FromWire(w) return &v, err } -// FromWire deserializes a HistoryService_PollMutableState_Args struct from its Thrift-level +// FromWire deserializes a HistoryService_MergeDLQMessages_Args struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a HistoryService_PollMutableState_Args struct +// An error is returned if we were unable to build a HistoryService_MergeDLQMessages_Args struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -17567,19 +17567,19 @@ func _PollMutableStateRequest_Read(w wire.Value) (*PollMutableStateRequest, erro // return nil, err // } // -// var v HistoryService_PollMutableState_Args +// var v HistoryService_MergeDLQMessages_Args // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *HistoryService_PollMutableState_Args) FromWire(w wire.Value) error { +func (v *HistoryService_MergeDLQMessages_Args) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { switch field.ID { case 1: if field.Value.Type() == wire.TStruct { - v.PollRequest, err = _PollMutableStateRequest_Read(field.Value) + v.Request, err = _MergeDLQMessagesRequest_Read(field.Value) if err != nil { return err } @@ -17591,34 +17591,34 @@ func (v *HistoryService_PollMutableState_Args) FromWire(w wire.Value) error { return nil } -// String returns a readable string representation of a HistoryService_PollMutableState_Args +// String returns a readable string representation of a HistoryService_MergeDLQMessages_Args // struct. -func (v *HistoryService_PollMutableState_Args) String() string { +func (v *HistoryService_MergeDLQMessages_Args) String() string { if v == nil { return "" } var fields [1]string i := 0 - if v.PollRequest != nil { - fields[i] = fmt.Sprintf("PollRequest: %v", v.PollRequest) + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) i++ } - return fmt.Sprintf("HistoryService_PollMutableState_Args{%v}", strings.Join(fields[:i], ", ")) + return fmt.Sprintf("HistoryService_MergeDLQMessages_Args{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this HistoryService_PollMutableState_Args match the -// provided HistoryService_PollMutableState_Args. +// Equals returns true if all the fields of this HistoryService_MergeDLQMessages_Args match the +// provided HistoryService_MergeDLQMessages_Args. // // This function performs a deep comparison. -func (v *HistoryService_PollMutableState_Args) Equals(rhs *HistoryService_PollMutableState_Args) bool { +func (v *HistoryService_MergeDLQMessages_Args) Equals(rhs *HistoryService_MergeDLQMessages_Args) bool { if v == nil { return rhs == nil } else if rhs == nil { return false } - if !((v.PollRequest == nil && rhs.PollRequest == nil) || (v.PollRequest != nil && rhs.PollRequest != nil && v.PollRequest.Equals(rhs.PollRequest))) { + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { return false } @@ -17626,168 +17626,154 @@ func (v *HistoryService_PollMutableState_Args) Equals(rhs *HistoryService_PollMu } // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of HistoryService_PollMutableState_Args. -func (v *HistoryService_PollMutableState_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of HistoryService_MergeDLQMessages_Args. +func (v *HistoryService_MergeDLQMessages_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } - if v.PollRequest != nil { - err = multierr.Append(err, enc.AddObject("pollRequest", v.PollRequest)) + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) } return err } -// GetPollRequest returns the value of PollRequest if it is set or its +// GetRequest returns the value of Request if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Args) GetPollRequest() (o *PollMutableStateRequest) { - if v != nil && v.PollRequest != nil { - return v.PollRequest +func (v *HistoryService_MergeDLQMessages_Args) GetRequest() (o *replicator.MergeDLQMessagesRequest) { + if v != nil && v.Request != nil { + return v.Request } return } -// IsSetPollRequest returns true if PollRequest is not nil. -func (v *HistoryService_PollMutableState_Args) IsSetPollRequest() bool { - return v != nil && v.PollRequest != nil +// IsSetRequest returns true if Request is not nil. +func (v *HistoryService_MergeDLQMessages_Args) IsSetRequest() bool { + return v != nil && v.Request != nil } // MethodName returns the name of the Thrift function as specified in // the IDL, for which this struct represent the arguments. // -// This will always be "PollMutableState" for this struct. -func (v *HistoryService_PollMutableState_Args) MethodName() string { - return "PollMutableState" +// This will always be "MergeDLQMessages" for this struct. +func (v *HistoryService_MergeDLQMessages_Args) MethodName() string { + return "MergeDLQMessages" } // EnvelopeType returns the kind of value inside this struct. // // This will always be Call for this struct. -func (v *HistoryService_PollMutableState_Args) EnvelopeType() wire.EnvelopeType { +func (v *HistoryService_MergeDLQMessages_Args) EnvelopeType() wire.EnvelopeType { return wire.Call } -// HistoryService_PollMutableState_Helper provides functions that aid in handling the -// parameters and return values of the HistoryService.PollMutableState +// HistoryService_MergeDLQMessages_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.MergeDLQMessages // function. -var HistoryService_PollMutableState_Helper = struct { - // Args accepts the parameters of PollMutableState in-order and returns +var HistoryService_MergeDLQMessages_Helper = struct { + // Args accepts the parameters of MergeDLQMessages in-order and returns // the arguments struct for the function. Args func( - pollRequest *PollMutableStateRequest, - ) *HistoryService_PollMutableState_Args + request *replicator.MergeDLQMessagesRequest, + ) *HistoryService_MergeDLQMessages_Args // IsException returns true if the given error can be thrown - // by PollMutableState. + // by MergeDLQMessages. // - // An error can be thrown by PollMutableState only if the + // An error can be thrown by MergeDLQMessages only if the // corresponding exception type was mentioned in the 'throws' // section for it in the Thrift file. IsException func(error) bool - // WrapResponse returns the result struct for PollMutableState + // WrapResponse returns the result struct for MergeDLQMessages // given its return value and error. // // This allows mapping values and errors returned by - // PollMutableState into a serializable result struct. + // MergeDLQMessages into a serializable result struct. // WrapResponse returns a non-nil error if the provided - // error cannot be thrown by PollMutableState + // error cannot be thrown by MergeDLQMessages // - // value, err := PollMutableState(args) - // result, err := HistoryService_PollMutableState_Helper.WrapResponse(value, err) + // value, err := MergeDLQMessages(args) + // result, err := HistoryService_MergeDLQMessages_Helper.WrapResponse(value, err) // if err != nil { - // return fmt.Errorf("unexpected error from PollMutableState: %v", err) + // return fmt.Errorf("unexpected error from MergeDLQMessages: %v", err) // } // serialize(result) - WrapResponse func(*PollMutableStateResponse, error) (*HistoryService_PollMutableState_Result, error) + WrapResponse func(*replicator.MergeDLQMessagesResponse, error) (*HistoryService_MergeDLQMessages_Result, error) - // UnwrapResponse takes the result struct for PollMutableState + // UnwrapResponse takes the result struct for MergeDLQMessages // and returns the value or error returned by it. // - // The error is non-nil only if PollMutableState threw an + // The error is non-nil only if MergeDLQMessages threw an // exception. // // result := deserialize(bytes) - // value, err := HistoryService_PollMutableState_Helper.UnwrapResponse(result) - UnwrapResponse func(*HistoryService_PollMutableState_Result) (*PollMutableStateResponse, error) + // value, err := HistoryService_MergeDLQMessages_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_MergeDLQMessages_Result) (*replicator.MergeDLQMessagesResponse, error) }{} func init() { - HistoryService_PollMutableState_Helper.Args = func( - pollRequest *PollMutableStateRequest, - ) *HistoryService_PollMutableState_Args { - return &HistoryService_PollMutableState_Args{ - PollRequest: pollRequest, + HistoryService_MergeDLQMessages_Helper.Args = func( + request *replicator.MergeDLQMessagesRequest, + ) *HistoryService_MergeDLQMessages_Args { + return &HistoryService_MergeDLQMessages_Args{ + Request: request, } } - HistoryService_PollMutableState_Helper.IsException = func(err error) bool { + HistoryService_MergeDLQMessages_Helper.IsException = func(err error) bool { switch err.(type) { case *shared.BadRequestError: return true case *shared.InternalServiceError: return true + case *shared.ServiceBusyError: + return true case *shared.EntityNotExistsError: return true case *ShardOwnershipLostError: return true - case *shared.LimitExceededError: - return true - case *shared.ServiceBusyError: - return true - case *shared.CurrentBranchChangedError: - return true default: return false } } - HistoryService_PollMutableState_Helper.WrapResponse = func(success *PollMutableStateResponse, err error) (*HistoryService_PollMutableState_Result, error) { + HistoryService_MergeDLQMessages_Helper.WrapResponse = func(success *replicator.MergeDLQMessagesResponse, err error) (*HistoryService_MergeDLQMessages_Result, error) { if err == nil { - return &HistoryService_PollMutableState_Result{Success: success}, nil + return &HistoryService_MergeDLQMessages_Result{Success: success}, nil } switch e := err.(type) { case *shared.BadRequestError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.BadRequestError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_MergeDLQMessages_Result.BadRequestError") } - return &HistoryService_PollMutableState_Result{BadRequestError: e}, nil + return &HistoryService_MergeDLQMessages_Result{BadRequestError: e}, nil case *shared.InternalServiceError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.InternalServiceError") - } - return &HistoryService_PollMutableState_Result{InternalServiceError: e}, nil - case *shared.EntityNotExistsError: - if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.EntityNotExistError") - } - return &HistoryService_PollMutableState_Result{EntityNotExistError: e}, nil - case *ShardOwnershipLostError: - if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.ShardOwnershipLostError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_MergeDLQMessages_Result.InternalServiceError") } - return &HistoryService_PollMutableState_Result{ShardOwnershipLostError: e}, nil - case *shared.LimitExceededError: + return &HistoryService_MergeDLQMessages_Result{InternalServiceError: e}, nil + case *shared.ServiceBusyError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.LimitExceededError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_MergeDLQMessages_Result.ServiceBusyError") } - return &HistoryService_PollMutableState_Result{LimitExceededError: e}, nil - case *shared.ServiceBusyError: + return &HistoryService_MergeDLQMessages_Result{ServiceBusyError: e}, nil + case *shared.EntityNotExistsError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.ServiceBusyError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_MergeDLQMessages_Result.EntityNotExistError") } - return &HistoryService_PollMutableState_Result{ServiceBusyError: e}, nil - case *shared.CurrentBranchChangedError: + return &HistoryService_MergeDLQMessages_Result{EntityNotExistError: e}, nil + case *ShardOwnershipLostError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.CurrentBranchChangedError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_MergeDLQMessages_Result.ShardOwnershipLostError") } - return &HistoryService_PollMutableState_Result{CurrentBranchChangedError: e}, nil + return &HistoryService_MergeDLQMessages_Result{ShardOwnershipLostError: e}, nil } return nil, err } - HistoryService_PollMutableState_Helper.UnwrapResponse = func(result *HistoryService_PollMutableState_Result) (success *PollMutableStateResponse, err error) { + HistoryService_MergeDLQMessages_Helper.UnwrapResponse = func(result *HistoryService_MergeDLQMessages_Result) (success *replicator.MergeDLQMessagesResponse, err error) { if result.BadRequestError != nil { err = result.BadRequestError return @@ -17796,6 +17782,10 @@ func init() { err = result.InternalServiceError return } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } if result.EntityNotExistError != nil { err = result.EntityNotExistError return @@ -17804,18 +17794,6 @@ func init() { err = result.ShardOwnershipLostError return } - if result.LimitExceededError != nil { - err = result.LimitExceededError - return - } - if result.ServiceBusyError != nil { - err = result.ServiceBusyError - return - } - if result.CurrentBranchChangedError != nil { - err = result.CurrentBranchChangedError - return - } if result.Success != nil { success = result.Success @@ -17828,24 +17806,22 @@ func init() { } -// HistoryService_PollMutableState_Result represents the result of a HistoryService.PollMutableState function call. +// HistoryService_MergeDLQMessages_Result represents the result of a HistoryService.MergeDLQMessages function call. // -// The result of a PollMutableState execution is sent and received over the wire as this struct. +// The result of a MergeDLQMessages execution is sent and received over the wire as this struct. // // Success is set only if the function did not throw an exception. -type HistoryService_PollMutableState_Result struct { - // Value returned by PollMutableState after a successful execution. - Success *PollMutableStateResponse `json:"success,omitempty"` - BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` - InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` - EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` - ShardOwnershipLostError *ShardOwnershipLostError `json:"shardOwnershipLostError,omitempty"` - LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` - ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` - CurrentBranchChangedError *shared.CurrentBranchChangedError `json:"currentBranchChangedError,omitempty"` +type HistoryService_MergeDLQMessages_Result struct { + // Value returned by MergeDLQMessages after a successful execution. + Success *replicator.MergeDLQMessagesResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` + ShardOwnershipLostError *ShardOwnershipLostError `json:"shardOwnershipLostError,omitempty"` } -// ToWire translates a HistoryService_PollMutableState_Result struct into a Thrift-level intermediate +// ToWire translates a HistoryService_MergeDLQMessages_Result struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -17860,9 +17836,9 @@ type HistoryService_PollMutableState_Result struct { // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *HistoryService_PollMutableState_Result) ToWire() (wire.Value, error) { +func (v *HistoryService_MergeDLQMessages_Result) ToWire() (wire.Value, error) { var ( - fields [8]wire.Field + fields [6]wire.Field i int = 0 w wire.Value err error @@ -17892,65 +17868,49 @@ func (v *HistoryService_PollMutableState_Result) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 2, Value: w} i++ } - if v.EntityNotExistError != nil { - w, err = v.EntityNotExistError.ToWire() + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 3, Value: w} i++ } - if v.ShardOwnershipLostError != nil { - w, err = v.ShardOwnershipLostError.ToWire() + if v.EntityNotExistError != nil { + w, err = v.EntityNotExistError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 4, Value: w} i++ } - if v.LimitExceededError != nil { - w, err = v.LimitExceededError.ToWire() + if v.ShardOwnershipLostError != nil { + w, err = v.ShardOwnershipLostError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 5, Value: w} i++ } - if v.ServiceBusyError != nil { - w, err = v.ServiceBusyError.ToWire() - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 6, Value: w} - i++ - } - if v.CurrentBranchChangedError != nil { - w, err = v.CurrentBranchChangedError.ToWire() - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 7, Value: w} - i++ - } if i != 1 { - return wire.Value{}, fmt.Errorf("HistoryService_PollMutableState_Result should have exactly one field: got %v fields", i) + return wire.Value{}, fmt.Errorf("HistoryService_MergeDLQMessages_Result should have exactly one field: got %v fields", i) } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _PollMutableStateResponse_Read(w wire.Value) (*PollMutableStateResponse, error) { - var v PollMutableStateResponse +func _MergeDLQMessagesResponse_Read(w wire.Value) (*replicator.MergeDLQMessagesResponse, error) { + var v replicator.MergeDLQMessagesResponse err := v.FromWire(w) return &v, err } -// FromWire deserializes a HistoryService_PollMutableState_Result struct from its Thrift-level +// FromWire deserializes a HistoryService_MergeDLQMessages_Result struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a HistoryService_PollMutableState_Result struct +// An error is returned if we were unable to build a HistoryService_MergeDLQMessages_Result struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -17958,19 +17918,19 @@ func _PollMutableStateResponse_Read(w wire.Value) (*PollMutableStateResponse, er // return nil, err // } // -// var v HistoryService_PollMutableState_Result +// var v HistoryService_MergeDLQMessages_Result // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { +func (v *HistoryService_MergeDLQMessages_Result) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { switch field.ID { case 0: if field.Value.Type() == wire.TStruct { - v.Success, err = _PollMutableStateResponse_Read(field.Value) + v.Success, err = _MergeDLQMessagesResponse_Read(field.Value) if err != nil { return err } @@ -17994,7 +17954,7 @@ func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { } case 3: if field.Value.Type() == wire.TStruct { - v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) if err != nil { return err } @@ -18002,7 +17962,7 @@ func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { } case 4: if field.Value.Type() == wire.TStruct { - v.ShardOwnershipLostError, err = _ShardOwnershipLostError_Read(field.Value) + v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) if err != nil { return err } @@ -18010,23 +17970,7 @@ func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { } case 5: if field.Value.Type() == wire.TStruct { - v.LimitExceededError, err = _LimitExceededError_Read(field.Value) - if err != nil { - return err - } - - } - case 6: - if field.Value.Type() == wire.TStruct { - v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) - if err != nil { - return err - } - - } - case 7: - if field.Value.Type() == wire.TStruct { - v.CurrentBranchChangedError, err = _CurrentBranchChangedError_Read(field.Value) + v.ShardOwnershipLostError, err = _ShardOwnershipLostError_Read(field.Value) if err != nil { return err } @@ -18045,36 +17989,30 @@ func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { if v.InternalServiceError != nil { count++ } - if v.EntityNotExistError != nil { - count++ - } - if v.ShardOwnershipLostError != nil { - count++ - } - if v.LimitExceededError != nil { + if v.ServiceBusyError != nil { count++ } - if v.ServiceBusyError != nil { + if v.EntityNotExistError != nil { count++ } - if v.CurrentBranchChangedError != nil { + if v.ShardOwnershipLostError != nil { count++ } if count != 1 { - return fmt.Errorf("HistoryService_PollMutableState_Result should have exactly one field: got %v fields", count) + return fmt.Errorf("HistoryService_MergeDLQMessages_Result should have exactly one field: got %v fields", count) } return nil } -// String returns a readable string representation of a HistoryService_PollMutableState_Result +// String returns a readable string representation of a HistoryService_MergeDLQMessages_Result // struct. -func (v *HistoryService_PollMutableState_Result) String() string { +func (v *HistoryService_MergeDLQMessages_Result) String() string { if v == nil { return "" } - var fields [8]string + var fields [6]string i := 0 if v.Success != nil { fields[i] = fmt.Sprintf("Success: %v", v.Success) @@ -18088,6 +18026,10 @@ func (v *HistoryService_PollMutableState_Result) String() string { fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) i++ } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } if v.EntityNotExistError != nil { fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) i++ @@ -18096,27 +18038,15 @@ func (v *HistoryService_PollMutableState_Result) String() string { fields[i] = fmt.Sprintf("ShardOwnershipLostError: %v", v.ShardOwnershipLostError) i++ } - if v.LimitExceededError != nil { - fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) - i++ - } - if v.ServiceBusyError != nil { - fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) - i++ - } - if v.CurrentBranchChangedError != nil { - fields[i] = fmt.Sprintf("CurrentBranchChangedError: %v", v.CurrentBranchChangedError) - i++ - } - return fmt.Sprintf("HistoryService_PollMutableState_Result{%v}", strings.Join(fields[:i], ", ")) + return fmt.Sprintf("HistoryService_MergeDLQMessages_Result{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this HistoryService_PollMutableState_Result match the -// provided HistoryService_PollMutableState_Result. +// Equals returns true if all the fields of this HistoryService_MergeDLQMessages_Result match the +// provided HistoryService_MergeDLQMessages_Result. // // This function performs a deep comparison. -func (v *HistoryService_PollMutableState_Result) Equals(rhs *HistoryService_PollMutableState_Result) bool { +func (v *HistoryService_MergeDLQMessages_Result) Equals(rhs *HistoryService_MergeDLQMessages_Result) bool { if v == nil { return rhs == nil } else if rhs == nil { @@ -18131,19 +18061,13 @@ func (v *HistoryService_PollMutableState_Result) Equals(rhs *HistoryService_Poll if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { return false } - if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { - return false - } - if !((v.ShardOwnershipLostError == nil && rhs.ShardOwnershipLostError == nil) || (v.ShardOwnershipLostError != nil && rhs.ShardOwnershipLostError != nil && v.ShardOwnershipLostError.Equals(rhs.ShardOwnershipLostError))) { - return false - } - if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { return false } - if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { return false } - if !((v.CurrentBranchChangedError == nil && rhs.CurrentBranchChangedError == nil) || (v.CurrentBranchChangedError != nil && rhs.CurrentBranchChangedError != nil && v.CurrentBranchChangedError.Equals(rhs.CurrentBranchChangedError))) { + if !((v.ShardOwnershipLostError == nil && rhs.ShardOwnershipLostError == nil) || (v.ShardOwnershipLostError != nil && rhs.ShardOwnershipLostError != nil && v.ShardOwnershipLostError.Equals(rhs.ShardOwnershipLostError))) { return false } @@ -18151,8 +18075,8 @@ func (v *HistoryService_PollMutableState_Result) Equals(rhs *HistoryService_Poll } // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of HistoryService_PollMutableState_Result. -func (v *HistoryService_PollMutableState_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of HistoryService_MergeDLQMessages_Result. +func (v *HistoryService_MergeDLQMessages_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } @@ -18165,11 +18089,2242 @@ func (v *HistoryService_PollMutableState_Result) MarshalLogObject(enc zapcore.Ob if v.InternalServiceError != nil { err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) } - if v.EntityNotExistError != nil { + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.EntityNotExistError != nil { + err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) + } + if v.ShardOwnershipLostError != nil { + err = multierr.Append(err, enc.AddObject("shardOwnershipLostError", v.ShardOwnershipLostError)) + } + return err +} + +// GetSuccess returns the value of Success if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetSuccess() (o *replicator.MergeDLQMessagesResponse) { + if v != nil && v.Success != nil { + return v.Success + } + + return +} + +// IsSetSuccess returns true if Success is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetSuccess() bool { + return v != nil && v.Success != nil +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetInternalServiceError returns the value of InternalServiceError if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetInternalServiceError() (o *shared.InternalServiceError) { + if v != nil && v.InternalServiceError != nil { + return v.InternalServiceError + } + + return +} + +// IsSetInternalServiceError returns true if InternalServiceError is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetInternalServiceError() bool { + return v != nil && v.InternalServiceError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetEntityNotExistError returns the value of EntityNotExistError if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { + if v != nil && v.EntityNotExistError != nil { + return v.EntityNotExistError + } + + return +} + +// IsSetEntityNotExistError returns true if EntityNotExistError is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetEntityNotExistError() bool { + return v != nil && v.EntityNotExistError != nil +} + +// GetShardOwnershipLostError returns the value of ShardOwnershipLostError if it is set or its +// zero value if it is unset. +func (v *HistoryService_MergeDLQMessages_Result) GetShardOwnershipLostError() (o *ShardOwnershipLostError) { + if v != nil && v.ShardOwnershipLostError != nil { + return v.ShardOwnershipLostError + } + + return +} + +// IsSetShardOwnershipLostError returns true if ShardOwnershipLostError is not nil. +func (v *HistoryService_MergeDLQMessages_Result) IsSetShardOwnershipLostError() bool { + return v != nil && v.ShardOwnershipLostError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "MergeDLQMessages" for this struct. +func (v *HistoryService_MergeDLQMessages_Result) MethodName() string { + return "MergeDLQMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *HistoryService_MergeDLQMessages_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + +// HistoryService_PollMutableState_Args represents the arguments for the HistoryService.PollMutableState function. +// +// The arguments for PollMutableState are sent and received over the wire as this struct. +type HistoryService_PollMutableState_Args struct { + PollRequest *PollMutableStateRequest `json:"pollRequest,omitempty"` +} + +// ToWire translates a HistoryService_PollMutableState_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_PollMutableState_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.PollRequest != nil { + w, err = v.PollRequest.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _PollMutableStateRequest_Read(w wire.Value) (*PollMutableStateRequest, error) { + var v PollMutableStateRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_PollMutableState_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_PollMutableState_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_PollMutableState_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_PollMutableState_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.PollRequest, err = _PollMutableStateRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a HistoryService_PollMutableState_Args +// struct. +func (v *HistoryService_PollMutableState_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.PollRequest != nil { + fields[i] = fmt.Sprintf("PollRequest: %v", v.PollRequest) + i++ + } + + return fmt.Sprintf("HistoryService_PollMutableState_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_PollMutableState_Args match the +// provided HistoryService_PollMutableState_Args. +// +// This function performs a deep comparison. +func (v *HistoryService_PollMutableState_Args) Equals(rhs *HistoryService_PollMutableState_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.PollRequest == nil && rhs.PollRequest == nil) || (v.PollRequest != nil && rhs.PollRequest != nil && v.PollRequest.Equals(rhs.PollRequest))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_PollMutableState_Args. +func (v *HistoryService_PollMutableState_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.PollRequest != nil { + err = multierr.Append(err, enc.AddObject("pollRequest", v.PollRequest)) + } + return err +} + +// GetPollRequest returns the value of PollRequest if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Args) GetPollRequest() (o *PollMutableStateRequest) { + if v != nil && v.PollRequest != nil { + return v.PollRequest + } + + return +} + +// IsSetPollRequest returns true if PollRequest is not nil. +func (v *HistoryService_PollMutableState_Args) IsSetPollRequest() bool { + return v != nil && v.PollRequest != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "PollMutableState" for this struct. +func (v *HistoryService_PollMutableState_Args) MethodName() string { + return "PollMutableState" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *HistoryService_PollMutableState_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// HistoryService_PollMutableState_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.PollMutableState +// function. +var HistoryService_PollMutableState_Helper = struct { + // Args accepts the parameters of PollMutableState in-order and returns + // the arguments struct for the function. + Args func( + pollRequest *PollMutableStateRequest, + ) *HistoryService_PollMutableState_Args + + // IsException returns true if the given error can be thrown + // by PollMutableState. + // + // An error can be thrown by PollMutableState only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for PollMutableState + // given its return value and error. + // + // This allows mapping values and errors returned by + // PollMutableState into a serializable result struct. + // WrapResponse returns a non-nil error if the provided + // error cannot be thrown by PollMutableState + // + // value, err := PollMutableState(args) + // result, err := HistoryService_PollMutableState_Helper.WrapResponse(value, err) + // if err != nil { + // return fmt.Errorf("unexpected error from PollMutableState: %v", err) + // } + // serialize(result) + WrapResponse func(*PollMutableStateResponse, error) (*HistoryService_PollMutableState_Result, error) + + // UnwrapResponse takes the result struct for PollMutableState + // and returns the value or error returned by it. + // + // The error is non-nil only if PollMutableState threw an + // exception. + // + // result := deserialize(bytes) + // value, err := HistoryService_PollMutableState_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_PollMutableState_Result) (*PollMutableStateResponse, error) +}{} + +func init() { + HistoryService_PollMutableState_Helper.Args = func( + pollRequest *PollMutableStateRequest, + ) *HistoryService_PollMutableState_Args { + return &HistoryService_PollMutableState_Args{ + PollRequest: pollRequest, + } + } + + HistoryService_PollMutableState_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.InternalServiceError: + return true + case *shared.EntityNotExistsError: + return true + case *ShardOwnershipLostError: + return true + case *shared.LimitExceededError: + return true + case *shared.ServiceBusyError: + return true + case *shared.CurrentBranchChangedError: + return true + default: + return false + } + } + + HistoryService_PollMutableState_Helper.WrapResponse = func(success *PollMutableStateResponse, err error) (*HistoryService_PollMutableState_Result, error) { + if err == nil { + return &HistoryService_PollMutableState_Result{Success: success}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.BadRequestError") + } + return &HistoryService_PollMutableState_Result{BadRequestError: e}, nil + case *shared.InternalServiceError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.InternalServiceError") + } + return &HistoryService_PollMutableState_Result{InternalServiceError: e}, nil + case *shared.EntityNotExistsError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.EntityNotExistError") + } + return &HistoryService_PollMutableState_Result{EntityNotExistError: e}, nil + case *ShardOwnershipLostError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.ShardOwnershipLostError") + } + return &HistoryService_PollMutableState_Result{ShardOwnershipLostError: e}, nil + case *shared.LimitExceededError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.LimitExceededError") + } + return &HistoryService_PollMutableState_Result{LimitExceededError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.ServiceBusyError") + } + return &HistoryService_PollMutableState_Result{ServiceBusyError: e}, nil + case *shared.CurrentBranchChangedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PollMutableState_Result.CurrentBranchChangedError") + } + return &HistoryService_PollMutableState_Result{CurrentBranchChangedError: e}, nil + } + + return nil, err + } + HistoryService_PollMutableState_Helper.UnwrapResponse = func(result *HistoryService_PollMutableState_Result) (success *PollMutableStateResponse, err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.InternalServiceError != nil { + err = result.InternalServiceError + return + } + if result.EntityNotExistError != nil { + err = result.EntityNotExistError + return + } + if result.ShardOwnershipLostError != nil { + err = result.ShardOwnershipLostError + return + } + if result.LimitExceededError != nil { + err = result.LimitExceededError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.CurrentBranchChangedError != nil { + err = result.CurrentBranchChangedError + return + } + + if result.Success != nil { + success = result.Success + return + } + + err = errors.New("expected a non-void result") + return + } + +} + +// HistoryService_PollMutableState_Result represents the result of a HistoryService.PollMutableState function call. +// +// The result of a PollMutableState execution is sent and received over the wire as this struct. +// +// Success is set only if the function did not throw an exception. +type HistoryService_PollMutableState_Result struct { + // Value returned by PollMutableState after a successful execution. + Success *PollMutableStateResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` + ShardOwnershipLostError *ShardOwnershipLostError `json:"shardOwnershipLostError,omitempty"` + LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + CurrentBranchChangedError *shared.CurrentBranchChangedError `json:"currentBranchChangedError,omitempty"` +} + +// ToWire translates a HistoryService_PollMutableState_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_PollMutableState_Result) ToWire() (wire.Value, error) { + var ( + fields [8]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Success != nil { + w, err = v.Success.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 0, Value: w} + i++ + } + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.InternalServiceError != nil { + w, err = v.InternalServiceError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.EntityNotExistError != nil { + w, err = v.EntityNotExistError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.ShardOwnershipLostError != nil { + w, err = v.ShardOwnershipLostError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + if v.LimitExceededError != nil { + w, err = v.LimitExceededError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 6, Value: w} + i++ + } + if v.CurrentBranchChangedError != nil { + w, err = v.CurrentBranchChangedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 7, Value: w} + i++ + } + + if i != 1 { + return wire.Value{}, fmt.Errorf("HistoryService_PollMutableState_Result should have exactly one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _PollMutableStateResponse_Read(w wire.Value) (*PollMutableStateResponse, error) { + var v PollMutableStateResponse + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_PollMutableState_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_PollMutableState_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_PollMutableState_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_PollMutableState_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 0: + if field.Value.Type() == wire.TStruct { + v.Success, err = _PollMutableStateResponse_Read(field.Value) + if err != nil { + return err + } + + } + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.InternalServiceError, err = _InternalServiceError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.ShardOwnershipLostError, err = _ShardOwnershipLostError_Read(field.Value) + if err != nil { + return err + } + + } + case 5: + if field.Value.Type() == wire.TStruct { + v.LimitExceededError, err = _LimitExceededError_Read(field.Value) + if err != nil { + return err + } + + } + case 6: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 7: + if field.Value.Type() == wire.TStruct { + v.CurrentBranchChangedError, err = _CurrentBranchChangedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.Success != nil { + count++ + } + if v.BadRequestError != nil { + count++ + } + if v.InternalServiceError != nil { + count++ + } + if v.EntityNotExistError != nil { + count++ + } + if v.ShardOwnershipLostError != nil { + count++ + } + if v.LimitExceededError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.CurrentBranchChangedError != nil { + count++ + } + if count != 1 { + return fmt.Errorf("HistoryService_PollMutableState_Result should have exactly one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a HistoryService_PollMutableState_Result +// struct. +func (v *HistoryService_PollMutableState_Result) String() string { + if v == nil { + return "" + } + + var fields [8]string + i := 0 + if v.Success != nil { + fields[i] = fmt.Sprintf("Success: %v", v.Success) + i++ + } + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.InternalServiceError != nil { + fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) + i++ + } + if v.EntityNotExistError != nil { + fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) + i++ + } + if v.ShardOwnershipLostError != nil { + fields[i] = fmt.Sprintf("ShardOwnershipLostError: %v", v.ShardOwnershipLostError) + i++ + } + if v.LimitExceededError != nil { + fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.CurrentBranchChangedError != nil { + fields[i] = fmt.Sprintf("CurrentBranchChangedError: %v", v.CurrentBranchChangedError) + i++ + } + + return fmt.Sprintf("HistoryService_PollMutableState_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_PollMutableState_Result match the +// provided HistoryService_PollMutableState_Result. +// +// This function performs a deep comparison. +func (v *HistoryService_PollMutableState_Result) Equals(rhs *HistoryService_PollMutableState_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Success == nil && rhs.Success == nil) || (v.Success != nil && rhs.Success != nil && v.Success.Equals(rhs.Success))) { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { + return false + } + if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { + return false + } + if !((v.ShardOwnershipLostError == nil && rhs.ShardOwnershipLostError == nil) || (v.ShardOwnershipLostError != nil && rhs.ShardOwnershipLostError != nil && v.ShardOwnershipLostError.Equals(rhs.ShardOwnershipLostError))) { + return false + } + if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.CurrentBranchChangedError == nil && rhs.CurrentBranchChangedError == nil) || (v.CurrentBranchChangedError != nil && rhs.CurrentBranchChangedError != nil && v.CurrentBranchChangedError.Equals(rhs.CurrentBranchChangedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_PollMutableState_Result. +func (v *HistoryService_PollMutableState_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Success != nil { + err = multierr.Append(err, enc.AddObject("success", v.Success)) + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.InternalServiceError != nil { + err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) + } + if v.EntityNotExistError != nil { + err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) + } + if v.ShardOwnershipLostError != nil { + err = multierr.Append(err, enc.AddObject("shardOwnershipLostError", v.ShardOwnershipLostError)) + } + if v.LimitExceededError != nil { + err = multierr.Append(err, enc.AddObject("limitExceededError", v.LimitExceededError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.CurrentBranchChangedError != nil { + err = multierr.Append(err, enc.AddObject("currentBranchChangedError", v.CurrentBranchChangedError)) + } + return err +} + +// GetSuccess returns the value of Success if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetSuccess() (o *PollMutableStateResponse) { + if v != nil && v.Success != nil { + return v.Success + } + + return +} + +// IsSetSuccess returns true if Success is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetSuccess() bool { + return v != nil && v.Success != nil +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetInternalServiceError returns the value of InternalServiceError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetInternalServiceError() (o *shared.InternalServiceError) { + if v != nil && v.InternalServiceError != nil { + return v.InternalServiceError + } + + return +} + +// IsSetInternalServiceError returns true if InternalServiceError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetInternalServiceError() bool { + return v != nil && v.InternalServiceError != nil +} + +// GetEntityNotExistError returns the value of EntityNotExistError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { + if v != nil && v.EntityNotExistError != nil { + return v.EntityNotExistError + } + + return +} + +// IsSetEntityNotExistError returns true if EntityNotExistError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetEntityNotExistError() bool { + return v != nil && v.EntityNotExistError != nil +} + +// GetShardOwnershipLostError returns the value of ShardOwnershipLostError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetShardOwnershipLostError() (o *ShardOwnershipLostError) { + if v != nil && v.ShardOwnershipLostError != nil { + return v.ShardOwnershipLostError + } + + return +} + +// IsSetShardOwnershipLostError returns true if ShardOwnershipLostError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetShardOwnershipLostError() bool { + return v != nil && v.ShardOwnershipLostError != nil +} + +// GetLimitExceededError returns the value of LimitExceededError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetLimitExceededError() (o *shared.LimitExceededError) { + if v != nil && v.LimitExceededError != nil { + return v.LimitExceededError + } + + return +} + +// IsSetLimitExceededError returns true if LimitExceededError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetLimitExceededError() bool { + return v != nil && v.LimitExceededError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetCurrentBranchChangedError returns the value of CurrentBranchChangedError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PollMutableState_Result) GetCurrentBranchChangedError() (o *shared.CurrentBranchChangedError) { + if v != nil && v.CurrentBranchChangedError != nil { + return v.CurrentBranchChangedError + } + + return +} + +// IsSetCurrentBranchChangedError returns true if CurrentBranchChangedError is not nil. +func (v *HistoryService_PollMutableState_Result) IsSetCurrentBranchChangedError() bool { + return v != nil && v.CurrentBranchChangedError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "PollMutableState" for this struct. +func (v *HistoryService_PollMutableState_Result) MethodName() string { + return "PollMutableState" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *HistoryService_PollMutableState_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + +// HistoryService_PurgeDLQMessages_Args represents the arguments for the HistoryService.PurgeDLQMessages function. +// +// The arguments for PurgeDLQMessages are sent and received over the wire as this struct. +type HistoryService_PurgeDLQMessages_Args struct { + Request *replicator.PurgeDLQMessagesRequest `json:"request,omitempty"` +} + +// ToWire translates a HistoryService_PurgeDLQMessages_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_PurgeDLQMessages_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Request != nil { + w, err = v.Request.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _PurgeDLQMessagesRequest_Read(w wire.Value) (*replicator.PurgeDLQMessagesRequest, error) { + var v replicator.PurgeDLQMessagesRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_PurgeDLQMessages_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_PurgeDLQMessages_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_PurgeDLQMessages_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_PurgeDLQMessages_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.Request, err = _PurgeDLQMessagesRequest_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a HistoryService_PurgeDLQMessages_Args +// struct. +func (v *HistoryService_PurgeDLQMessages_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) + i++ + } + + return fmt.Sprintf("HistoryService_PurgeDLQMessages_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_PurgeDLQMessages_Args match the +// provided HistoryService_PurgeDLQMessages_Args. +// +// This function performs a deep comparison. +func (v *HistoryService_PurgeDLQMessages_Args) Equals(rhs *HistoryService_PurgeDLQMessages_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_PurgeDLQMessages_Args. +func (v *HistoryService_PurgeDLQMessages_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) + } + return err +} + +// GetRequest returns the value of Request if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Args) GetRequest() (o *replicator.PurgeDLQMessagesRequest) { + if v != nil && v.Request != nil { + return v.Request + } + + return +} + +// IsSetRequest returns true if Request is not nil. +func (v *HistoryService_PurgeDLQMessages_Args) IsSetRequest() bool { + return v != nil && v.Request != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "PurgeDLQMessages" for this struct. +func (v *HistoryService_PurgeDLQMessages_Args) MethodName() string { + return "PurgeDLQMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *HistoryService_PurgeDLQMessages_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// HistoryService_PurgeDLQMessages_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.PurgeDLQMessages +// function. +var HistoryService_PurgeDLQMessages_Helper = struct { + // Args accepts the parameters of PurgeDLQMessages in-order and returns + // the arguments struct for the function. + Args func( + request *replicator.PurgeDLQMessagesRequest, + ) *HistoryService_PurgeDLQMessages_Args + + // IsException returns true if the given error can be thrown + // by PurgeDLQMessages. + // + // An error can be thrown by PurgeDLQMessages only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for PurgeDLQMessages + // given the error returned by it. The provided error may + // be nil if PurgeDLQMessages did not fail. + // + // This allows mapping errors returned by PurgeDLQMessages into a + // serializable result struct. WrapResponse returns a + // non-nil error if the provided error cannot be thrown by + // PurgeDLQMessages + // + // err := PurgeDLQMessages(args) + // result, err := HistoryService_PurgeDLQMessages_Helper.WrapResponse(err) + // if err != nil { + // return fmt.Errorf("unexpected error from PurgeDLQMessages: %v", err) + // } + // serialize(result) + WrapResponse func(error) (*HistoryService_PurgeDLQMessages_Result, error) + + // UnwrapResponse takes the result struct for PurgeDLQMessages + // and returns the erorr returned by it (if any). + // + // The error is non-nil only if PurgeDLQMessages threw an + // exception. + // + // result := deserialize(bytes) + // err := HistoryService_PurgeDLQMessages_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_PurgeDLQMessages_Result) error +}{} + +func init() { + HistoryService_PurgeDLQMessages_Helper.Args = func( + request *replicator.PurgeDLQMessagesRequest, + ) *HistoryService_PurgeDLQMessages_Args { + return &HistoryService_PurgeDLQMessages_Args{ + Request: request, + } + } + + HistoryService_PurgeDLQMessages_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.InternalServiceError: + return true + case *shared.ServiceBusyError: + return true + case *shared.EntityNotExistsError: + return true + case *ShardOwnershipLostError: + return true + default: + return false + } + } + + HistoryService_PurgeDLQMessages_Helper.WrapResponse = func(err error) (*HistoryService_PurgeDLQMessages_Result, error) { + if err == nil { + return &HistoryService_PurgeDLQMessages_Result{}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PurgeDLQMessages_Result.BadRequestError") + } + return &HistoryService_PurgeDLQMessages_Result{BadRequestError: e}, nil + case *shared.InternalServiceError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PurgeDLQMessages_Result.InternalServiceError") + } + return &HistoryService_PurgeDLQMessages_Result{InternalServiceError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PurgeDLQMessages_Result.ServiceBusyError") + } + return &HistoryService_PurgeDLQMessages_Result{ServiceBusyError: e}, nil + case *shared.EntityNotExistsError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PurgeDLQMessages_Result.EntityNotExistError") + } + return &HistoryService_PurgeDLQMessages_Result{EntityNotExistError: e}, nil + case *ShardOwnershipLostError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_PurgeDLQMessages_Result.ShardOwnershipLostError") + } + return &HistoryService_PurgeDLQMessages_Result{ShardOwnershipLostError: e}, nil + } + + return nil, err + } + HistoryService_PurgeDLQMessages_Helper.UnwrapResponse = func(result *HistoryService_PurgeDLQMessages_Result) (err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.InternalServiceError != nil { + err = result.InternalServiceError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.EntityNotExistError != nil { + err = result.EntityNotExistError + return + } + if result.ShardOwnershipLostError != nil { + err = result.ShardOwnershipLostError + return + } + return + } + +} + +// HistoryService_PurgeDLQMessages_Result represents the result of a HistoryService.PurgeDLQMessages function call. +// +// The result of a PurgeDLQMessages execution is sent and received over the wire as this struct. +type HistoryService_PurgeDLQMessages_Result struct { + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` + ShardOwnershipLostError *ShardOwnershipLostError `json:"shardOwnershipLostError,omitempty"` +} + +// ToWire translates a HistoryService_PurgeDLQMessages_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_PurgeDLQMessages_Result) ToWire() (wire.Value, error) { + var ( + fields [5]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.InternalServiceError != nil { + w, err = v.InternalServiceError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.EntityNotExistError != nil { + w, err = v.EntityNotExistError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + if v.ShardOwnershipLostError != nil { + w, err = v.ShardOwnershipLostError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + } + + if i > 1 { + return wire.Value{}, fmt.Errorf("HistoryService_PurgeDLQMessages_Result should have at most one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +// FromWire deserializes a HistoryService_PurgeDLQMessages_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_PurgeDLQMessages_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_PurgeDLQMessages_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_PurgeDLQMessages_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.InternalServiceError, err = _InternalServiceError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) + if err != nil { + return err + } + + } + case 5: + if field.Value.Type() == wire.TStruct { + v.ShardOwnershipLostError, err = _ShardOwnershipLostError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.BadRequestError != nil { + count++ + } + if v.InternalServiceError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.EntityNotExistError != nil { + count++ + } + if v.ShardOwnershipLostError != nil { + count++ + } + if count > 1 { + return fmt.Errorf("HistoryService_PurgeDLQMessages_Result should have at most one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a HistoryService_PurgeDLQMessages_Result +// struct. +func (v *HistoryService_PurgeDLQMessages_Result) String() string { + if v == nil { + return "" + } + + var fields [5]string + i := 0 + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.InternalServiceError != nil { + fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.EntityNotExistError != nil { + fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) + i++ + } + if v.ShardOwnershipLostError != nil { + fields[i] = fmt.Sprintf("ShardOwnershipLostError: %v", v.ShardOwnershipLostError) + i++ + } + + return fmt.Sprintf("HistoryService_PurgeDLQMessages_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_PurgeDLQMessages_Result match the +// provided HistoryService_PurgeDLQMessages_Result. +// +// This function performs a deep comparison. +func (v *HistoryService_PurgeDLQMessages_Result) Equals(rhs *HistoryService_PurgeDLQMessages_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { + return false + } + if !((v.ShardOwnershipLostError == nil && rhs.ShardOwnershipLostError == nil) || (v.ShardOwnershipLostError != nil && rhs.ShardOwnershipLostError != nil && v.ShardOwnershipLostError.Equals(rhs.ShardOwnershipLostError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_PurgeDLQMessages_Result. +func (v *HistoryService_PurgeDLQMessages_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.InternalServiceError != nil { + err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) + } + if v.ServiceBusyError != nil { + err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) + } + if v.EntityNotExistError != nil { + err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) + } + if v.ShardOwnershipLostError != nil { + err = multierr.Append(err, enc.AddObject("shardOwnershipLostError", v.ShardOwnershipLostError)) + } + return err +} + +// GetBadRequestError returns the value of BadRequestError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Result) GetBadRequestError() (o *shared.BadRequestError) { + if v != nil && v.BadRequestError != nil { + return v.BadRequestError + } + + return +} + +// IsSetBadRequestError returns true if BadRequestError is not nil. +func (v *HistoryService_PurgeDLQMessages_Result) IsSetBadRequestError() bool { + return v != nil && v.BadRequestError != nil +} + +// GetInternalServiceError returns the value of InternalServiceError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Result) GetInternalServiceError() (o *shared.InternalServiceError) { + if v != nil && v.InternalServiceError != nil { + return v.InternalServiceError + } + + return +} + +// IsSetInternalServiceError returns true if InternalServiceError is not nil. +func (v *HistoryService_PurgeDLQMessages_Result) IsSetInternalServiceError() bool { + return v != nil && v.InternalServiceError != nil +} + +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError + } + + return +} + +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *HistoryService_PurgeDLQMessages_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil +} + +// GetEntityNotExistError returns the value of EntityNotExistError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { + if v != nil && v.EntityNotExistError != nil { + return v.EntityNotExistError + } + + return +} + +// IsSetEntityNotExistError returns true if EntityNotExistError is not nil. +func (v *HistoryService_PurgeDLQMessages_Result) IsSetEntityNotExistError() bool { + return v != nil && v.EntityNotExistError != nil +} + +// GetShardOwnershipLostError returns the value of ShardOwnershipLostError if it is set or its +// zero value if it is unset. +func (v *HistoryService_PurgeDLQMessages_Result) GetShardOwnershipLostError() (o *ShardOwnershipLostError) { + if v != nil && v.ShardOwnershipLostError != nil { + return v.ShardOwnershipLostError + } + + return +} + +// IsSetShardOwnershipLostError returns true if ShardOwnershipLostError is not nil. +func (v *HistoryService_PurgeDLQMessages_Result) IsSetShardOwnershipLostError() bool { + return v != nil && v.ShardOwnershipLostError != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the result. +// +// This will always be "PurgeDLQMessages" for this struct. +func (v *HistoryService_PurgeDLQMessages_Result) MethodName() string { + return "PurgeDLQMessages" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Reply for this struct. +func (v *HistoryService_PurgeDLQMessages_Result) EnvelopeType() wire.EnvelopeType { + return wire.Reply +} + +// HistoryService_QueryWorkflow_Args represents the arguments for the HistoryService.QueryWorkflow function. +// +// The arguments for QueryWorkflow are sent and received over the wire as this struct. +type HistoryService_QueryWorkflow_Args struct { + QueryRequest *QueryWorkflowRequest `json:"queryRequest,omitempty"` +} + +// ToWire translates a HistoryService_QueryWorkflow_Args struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_QueryWorkflow_Args) ToWire() (wire.Value, error) { + var ( + fields [1]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.QueryRequest != nil { + w, err = v.QueryRequest.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _QueryWorkflowRequest_1_Read(w wire.Value) (*QueryWorkflowRequest, error) { + var v QueryWorkflowRequest + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_QueryWorkflow_Args struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_QueryWorkflow_Args struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_QueryWorkflow_Args +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_QueryWorkflow_Args) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 1: + if field.Value.Type() == wire.TStruct { + v.QueryRequest, err = _QueryWorkflowRequest_1_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + return nil +} + +// String returns a readable string representation of a HistoryService_QueryWorkflow_Args +// struct. +func (v *HistoryService_QueryWorkflow_Args) String() string { + if v == nil { + return "" + } + + var fields [1]string + i := 0 + if v.QueryRequest != nil { + fields[i] = fmt.Sprintf("QueryRequest: %v", v.QueryRequest) + i++ + } + + return fmt.Sprintf("HistoryService_QueryWorkflow_Args{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_QueryWorkflow_Args match the +// provided HistoryService_QueryWorkflow_Args. +// +// This function performs a deep comparison. +func (v *HistoryService_QueryWorkflow_Args) Equals(rhs *HistoryService_QueryWorkflow_Args) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.QueryRequest == nil && rhs.QueryRequest == nil) || (v.QueryRequest != nil && rhs.QueryRequest != nil && v.QueryRequest.Equals(rhs.QueryRequest))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_QueryWorkflow_Args. +func (v *HistoryService_QueryWorkflow_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.QueryRequest != nil { + err = multierr.Append(err, enc.AddObject("queryRequest", v.QueryRequest)) + } + return err +} + +// GetQueryRequest returns the value of QueryRequest if it is set or its +// zero value if it is unset. +func (v *HistoryService_QueryWorkflow_Args) GetQueryRequest() (o *QueryWorkflowRequest) { + if v != nil && v.QueryRequest != nil { + return v.QueryRequest + } + + return +} + +// IsSetQueryRequest returns true if QueryRequest is not nil. +func (v *HistoryService_QueryWorkflow_Args) IsSetQueryRequest() bool { + return v != nil && v.QueryRequest != nil +} + +// MethodName returns the name of the Thrift function as specified in +// the IDL, for which this struct represent the arguments. +// +// This will always be "QueryWorkflow" for this struct. +func (v *HistoryService_QueryWorkflow_Args) MethodName() string { + return "QueryWorkflow" +} + +// EnvelopeType returns the kind of value inside this struct. +// +// This will always be Call for this struct. +func (v *HistoryService_QueryWorkflow_Args) EnvelopeType() wire.EnvelopeType { + return wire.Call +} + +// HistoryService_QueryWorkflow_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.QueryWorkflow +// function. +var HistoryService_QueryWorkflow_Helper = struct { + // Args accepts the parameters of QueryWorkflow in-order and returns + // the arguments struct for the function. + Args func( + queryRequest *QueryWorkflowRequest, + ) *HistoryService_QueryWorkflow_Args + + // IsException returns true if the given error can be thrown + // by QueryWorkflow. + // + // An error can be thrown by QueryWorkflow only if the + // corresponding exception type was mentioned in the 'throws' + // section for it in the Thrift file. + IsException func(error) bool + + // WrapResponse returns the result struct for QueryWorkflow + // given its return value and error. + // + // This allows mapping values and errors returned by + // QueryWorkflow into a serializable result struct. + // WrapResponse returns a non-nil error if the provided + // error cannot be thrown by QueryWorkflow + // + // value, err := QueryWorkflow(args) + // result, err := HistoryService_QueryWorkflow_Helper.WrapResponse(value, err) + // if err != nil { + // return fmt.Errorf("unexpected error from QueryWorkflow: %v", err) + // } + // serialize(result) + WrapResponse func(*QueryWorkflowResponse, error) (*HistoryService_QueryWorkflow_Result, error) + + // UnwrapResponse takes the result struct for QueryWorkflow + // and returns the value or error returned by it. + // + // The error is non-nil only if QueryWorkflow threw an + // exception. + // + // result := deserialize(bytes) + // value, err := HistoryService_QueryWorkflow_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_QueryWorkflow_Result) (*QueryWorkflowResponse, error) +}{} + +func init() { + HistoryService_QueryWorkflow_Helper.Args = func( + queryRequest *QueryWorkflowRequest, + ) *HistoryService_QueryWorkflow_Args { + return &HistoryService_QueryWorkflow_Args{ + QueryRequest: queryRequest, + } + } + + HistoryService_QueryWorkflow_Helper.IsException = func(err error) bool { + switch err.(type) { + case *shared.BadRequestError: + return true + case *shared.InternalServiceError: + return true + case *shared.EntityNotExistsError: + return true + case *shared.QueryFailedError: + return true + case *shared.LimitExceededError: + return true + case *shared.ServiceBusyError: + return true + case *shared.ClientVersionNotSupportedError: + return true + default: + return false + } + } + + HistoryService_QueryWorkflow_Helper.WrapResponse = func(success *QueryWorkflowResponse, err error) (*HistoryService_QueryWorkflow_Result, error) { + if err == nil { + return &HistoryService_QueryWorkflow_Result{Success: success}, nil + } + + switch e := err.(type) { + case *shared.BadRequestError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.BadRequestError") + } + return &HistoryService_QueryWorkflow_Result{BadRequestError: e}, nil + case *shared.InternalServiceError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.InternalServiceError") + } + return &HistoryService_QueryWorkflow_Result{InternalServiceError: e}, nil + case *shared.EntityNotExistsError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.EntityNotExistError") + } + return &HistoryService_QueryWorkflow_Result{EntityNotExistError: e}, nil + case *shared.QueryFailedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.QueryFailedError") + } + return &HistoryService_QueryWorkflow_Result{QueryFailedError: e}, nil + case *shared.LimitExceededError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.LimitExceededError") + } + return &HistoryService_QueryWorkflow_Result{LimitExceededError: e}, nil + case *shared.ServiceBusyError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.ServiceBusyError") + } + return &HistoryService_QueryWorkflow_Result{ServiceBusyError: e}, nil + case *shared.ClientVersionNotSupportedError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.ClientVersionNotSupportedError") + } + return &HistoryService_QueryWorkflow_Result{ClientVersionNotSupportedError: e}, nil + } + + return nil, err + } + HistoryService_QueryWorkflow_Helper.UnwrapResponse = func(result *HistoryService_QueryWorkflow_Result) (success *QueryWorkflowResponse, err error) { + if result.BadRequestError != nil { + err = result.BadRequestError + return + } + if result.InternalServiceError != nil { + err = result.InternalServiceError + return + } + if result.EntityNotExistError != nil { + err = result.EntityNotExistError + return + } + if result.QueryFailedError != nil { + err = result.QueryFailedError + return + } + if result.LimitExceededError != nil { + err = result.LimitExceededError + return + } + if result.ServiceBusyError != nil { + err = result.ServiceBusyError + return + } + if result.ClientVersionNotSupportedError != nil { + err = result.ClientVersionNotSupportedError + return + } + + if result.Success != nil { + success = result.Success + return + } + + err = errors.New("expected a non-void result") + return + } + +} + +// HistoryService_QueryWorkflow_Result represents the result of a HistoryService.QueryWorkflow function call. +// +// The result of a QueryWorkflow execution is sent and received over the wire as this struct. +// +// Success is set only if the function did not throw an exception. +type HistoryService_QueryWorkflow_Result struct { + // Value returned by QueryWorkflow after a successful execution. + Success *QueryWorkflowResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` + QueryFailedError *shared.QueryFailedError `json:"queryFailedError,omitempty"` + LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` +} + +// ToWire translates a HistoryService_QueryWorkflow_Result struct into a Thrift-level intermediate +// representation. This intermediate representation may be serialized +// into bytes using a ThriftRW protocol implementation. +// +// An error is returned if the struct or any of its fields failed to +// validate. +// +// x, err := v.ToWire() +// if err != nil { +// return err +// } +// +// if err := binaryProtocol.Encode(x, writer); err != nil { +// return err +// } +func (v *HistoryService_QueryWorkflow_Result) ToWire() (wire.Value, error) { + var ( + fields [8]wire.Field + i int = 0 + w wire.Value + err error + ) + + if v.Success != nil { + w, err = v.Success.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 0, Value: w} + i++ + } + if v.BadRequestError != nil { + w, err = v.BadRequestError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 1, Value: w} + i++ + } + if v.InternalServiceError != nil { + w, err = v.InternalServiceError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 2, Value: w} + i++ + } + if v.EntityNotExistError != nil { + w, err = v.EntityNotExistError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 3, Value: w} + i++ + } + if v.QueryFailedError != nil { + w, err = v.QueryFailedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 4, Value: w} + i++ + } + if v.LimitExceededError != nil { + w, err = v.LimitExceededError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 5, Value: w} + i++ + } + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 6, Value: w} + i++ + } + if v.ClientVersionNotSupportedError != nil { + w, err = v.ClientVersionNotSupportedError.ToWire() + if err != nil { + return w, err + } + fields[i] = wire.Field{ID: 7, Value: w} + i++ + } + + if i != 1 { + return wire.Value{}, fmt.Errorf("HistoryService_QueryWorkflow_Result should have exactly one field: got %v fields", i) + } + + return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil +} + +func _QueryWorkflowResponse_1_Read(w wire.Value) (*QueryWorkflowResponse, error) { + var v QueryWorkflowResponse + err := v.FromWire(w) + return &v, err +} + +func _QueryFailedError_Read(w wire.Value) (*shared.QueryFailedError, error) { + var v shared.QueryFailedError + err := v.FromWire(w) + return &v, err +} + +// FromWire deserializes a HistoryService_QueryWorkflow_Result struct from its Thrift-level +// representation. The Thrift-level representation may be obtained +// from a ThriftRW protocol implementation. +// +// An error is returned if we were unable to build a HistoryService_QueryWorkflow_Result struct +// from the provided intermediate representation. +// +// x, err := binaryProtocol.Decode(reader, wire.TStruct) +// if err != nil { +// return nil, err +// } +// +// var v HistoryService_QueryWorkflow_Result +// if err := v.FromWire(x); err != nil { +// return nil, err +// } +// return &v, nil +func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { + var err error + + for _, field := range w.GetStruct().Fields { + switch field.ID { + case 0: + if field.Value.Type() == wire.TStruct { + v.Success, err = _QueryWorkflowResponse_1_Read(field.Value) + if err != nil { + return err + } + + } + case 1: + if field.Value.Type() == wire.TStruct { + v.BadRequestError, err = _BadRequestError_Read(field.Value) + if err != nil { + return err + } + + } + case 2: + if field.Value.Type() == wire.TStruct { + v.InternalServiceError, err = _InternalServiceError_Read(field.Value) + if err != nil { + return err + } + + } + case 3: + if field.Value.Type() == wire.TStruct { + v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) + if err != nil { + return err + } + + } + case 4: + if field.Value.Type() == wire.TStruct { + v.QueryFailedError, err = _QueryFailedError_Read(field.Value) + if err != nil { + return err + } + + } + case 5: + if field.Value.Type() == wire.TStruct { + v.LimitExceededError, err = _LimitExceededError_Read(field.Value) + if err != nil { + return err + } + + } + case 6: + if field.Value.Type() == wire.TStruct { + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) + if err != nil { + return err + } + + } + case 7: + if field.Value.Type() == wire.TStruct { + v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + if err != nil { + return err + } + + } + } + } + + count := 0 + if v.Success != nil { + count++ + } + if v.BadRequestError != nil { + count++ + } + if v.InternalServiceError != nil { + count++ + } + if v.EntityNotExistError != nil { + count++ + } + if v.QueryFailedError != nil { + count++ + } + if v.LimitExceededError != nil { + count++ + } + if v.ServiceBusyError != nil { + count++ + } + if v.ClientVersionNotSupportedError != nil { + count++ + } + if count != 1 { + return fmt.Errorf("HistoryService_QueryWorkflow_Result should have exactly one field: got %v fields", count) + } + + return nil +} + +// String returns a readable string representation of a HistoryService_QueryWorkflow_Result +// struct. +func (v *HistoryService_QueryWorkflow_Result) String() string { + if v == nil { + return "" + } + + var fields [8]string + i := 0 + if v.Success != nil { + fields[i] = fmt.Sprintf("Success: %v", v.Success) + i++ + } + if v.BadRequestError != nil { + fields[i] = fmt.Sprintf("BadRequestError: %v", v.BadRequestError) + i++ + } + if v.InternalServiceError != nil { + fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) + i++ + } + if v.EntityNotExistError != nil { + fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) + i++ + } + if v.QueryFailedError != nil { + fields[i] = fmt.Sprintf("QueryFailedError: %v", v.QueryFailedError) + i++ + } + if v.LimitExceededError != nil { + fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) + i++ + } + if v.ServiceBusyError != nil { + fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) + i++ + } + if v.ClientVersionNotSupportedError != nil { + fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + i++ + } + + return fmt.Sprintf("HistoryService_QueryWorkflow_Result{%v}", strings.Join(fields[:i], ", ")) +} + +// Equals returns true if all the fields of this HistoryService_QueryWorkflow_Result match the +// provided HistoryService_QueryWorkflow_Result. +// +// This function performs a deep comparison. +func (v *HistoryService_QueryWorkflow_Result) Equals(rhs *HistoryService_QueryWorkflow_Result) bool { + if v == nil { + return rhs == nil + } else if rhs == nil { + return false + } + if !((v.Success == nil && rhs.Success == nil) || (v.Success != nil && rhs.Success != nil && v.Success.Equals(rhs.Success))) { + return false + } + if !((v.BadRequestError == nil && rhs.BadRequestError == nil) || (v.BadRequestError != nil && rhs.BadRequestError != nil && v.BadRequestError.Equals(rhs.BadRequestError))) { + return false + } + if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { + return false + } + if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { + return false + } + if !((v.QueryFailedError == nil && rhs.QueryFailedError == nil) || (v.QueryFailedError != nil && rhs.QueryFailedError != nil && v.QueryFailedError.Equals(rhs.QueryFailedError))) { + return false + } + if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + return false + } + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + return false + } + if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + return false + } + + return true +} + +// MarshalLogObject implements zapcore.ObjectMarshaler, enabling +// fast logging of HistoryService_QueryWorkflow_Result. +func (v *HistoryService_QueryWorkflow_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { + if v == nil { + return nil + } + if v.Success != nil { + err = multierr.Append(err, enc.AddObject("success", v.Success)) + } + if v.BadRequestError != nil { + err = multierr.Append(err, enc.AddObject("badRequestError", v.BadRequestError)) + } + if v.InternalServiceError != nil { + err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) + } + if v.EntityNotExistError != nil { err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) } - if v.ShardOwnershipLostError != nil { - err = multierr.Append(err, enc.AddObject("shardOwnershipLostError", v.ShardOwnershipLostError)) + if v.QueryFailedError != nil { + err = multierr.Append(err, enc.AddObject("queryFailedError", v.QueryFailedError)) } if v.LimitExceededError != nil { err = multierr.Append(err, enc.AddObject("limitExceededError", v.LimitExceededError)) @@ -18177,15 +20332,15 @@ func (v *HistoryService_PollMutableState_Result) MarshalLogObject(enc zapcore.Ob if v.ServiceBusyError != nil { err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) } - if v.CurrentBranchChangedError != nil { - err = multierr.Append(err, enc.AddObject("currentBranchChangedError", v.CurrentBranchChangedError)) + if v.ClientVersionNotSupportedError != nil { + err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) } return err } // GetSuccess returns the value of Success if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetSuccess() (o *PollMutableStateResponse) { +func (v *HistoryService_QueryWorkflow_Result) GetSuccess() (o *QueryWorkflowResponse) { if v != nil && v.Success != nil { return v.Success } @@ -18194,13 +20349,13 @@ func (v *HistoryService_PollMutableState_Result) GetSuccess() (o *PollMutableSta } // IsSetSuccess returns true if Success is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetSuccess() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetSuccess() bool { return v != nil && v.Success != nil } // GetBadRequestError returns the value of BadRequestError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetBadRequestError() (o *shared.BadRequestError) { +func (v *HistoryService_QueryWorkflow_Result) GetBadRequestError() (o *shared.BadRequestError) { if v != nil && v.BadRequestError != nil { return v.BadRequestError } @@ -18209,13 +20364,13 @@ func (v *HistoryService_PollMutableState_Result) GetBadRequestError() (o *shared } // IsSetBadRequestError returns true if BadRequestError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetBadRequestError() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetBadRequestError() bool { return v != nil && v.BadRequestError != nil } // GetInternalServiceError returns the value of InternalServiceError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetInternalServiceError() (o *shared.InternalServiceError) { +func (v *HistoryService_QueryWorkflow_Result) GetInternalServiceError() (o *shared.InternalServiceError) { if v != nil && v.InternalServiceError != nil { return v.InternalServiceError } @@ -18224,13 +20379,13 @@ func (v *HistoryService_PollMutableState_Result) GetInternalServiceError() (o *s } // IsSetInternalServiceError returns true if InternalServiceError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetInternalServiceError() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetInternalServiceError() bool { return v != nil && v.InternalServiceError != nil } // GetEntityNotExistError returns the value of EntityNotExistError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { +func (v *HistoryService_QueryWorkflow_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { if v != nil && v.EntityNotExistError != nil { return v.EntityNotExistError } @@ -18239,28 +20394,28 @@ func (v *HistoryService_PollMutableState_Result) GetEntityNotExistError() (o *sh } // IsSetEntityNotExistError returns true if EntityNotExistError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetEntityNotExistError() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetEntityNotExistError() bool { return v != nil && v.EntityNotExistError != nil } -// GetShardOwnershipLostError returns the value of ShardOwnershipLostError if it is set or its +// GetQueryFailedError returns the value of QueryFailedError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetShardOwnershipLostError() (o *ShardOwnershipLostError) { - if v != nil && v.ShardOwnershipLostError != nil { - return v.ShardOwnershipLostError +func (v *HistoryService_QueryWorkflow_Result) GetQueryFailedError() (o *shared.QueryFailedError) { + if v != nil && v.QueryFailedError != nil { + return v.QueryFailedError } return } -// IsSetShardOwnershipLostError returns true if ShardOwnershipLostError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetShardOwnershipLostError() bool { - return v != nil && v.ShardOwnershipLostError != nil +// IsSetQueryFailedError returns true if QueryFailedError is not nil. +func (v *HistoryService_QueryWorkflow_Result) IsSetQueryFailedError() bool { + return v != nil && v.QueryFailedError != nil } // GetLimitExceededError returns the value of LimitExceededError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetLimitExceededError() (o *shared.LimitExceededError) { +func (v *HistoryService_QueryWorkflow_Result) GetLimitExceededError() (o *shared.LimitExceededError) { if v != nil && v.LimitExceededError != nil { return v.LimitExceededError } @@ -18269,13 +20424,13 @@ func (v *HistoryService_PollMutableState_Result) GetLimitExceededError() (o *sha } // IsSetLimitExceededError returns true if LimitExceededError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetLimitExceededError() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetLimitExceededError() bool { return v != nil && v.LimitExceededError != nil } // GetServiceBusyError returns the value of ServiceBusyError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { +func (v *HistoryService_QueryWorkflow_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { if v != nil && v.ServiceBusyError != nil { return v.ServiceBusyError } @@ -18284,48 +20439,48 @@ func (v *HistoryService_PollMutableState_Result) GetServiceBusyError() (o *share } // IsSetServiceBusyError returns true if ServiceBusyError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetServiceBusyError() bool { +func (v *HistoryService_QueryWorkflow_Result) IsSetServiceBusyError() bool { return v != nil && v.ServiceBusyError != nil } -// GetCurrentBranchChangedError returns the value of CurrentBranchChangedError if it is set or its +// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its // zero value if it is unset. -func (v *HistoryService_PollMutableState_Result) GetCurrentBranchChangedError() (o *shared.CurrentBranchChangedError) { - if v != nil && v.CurrentBranchChangedError != nil { - return v.CurrentBranchChangedError +func (v *HistoryService_QueryWorkflow_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { + if v != nil && v.ClientVersionNotSupportedError != nil { + return v.ClientVersionNotSupportedError } return } -// IsSetCurrentBranchChangedError returns true if CurrentBranchChangedError is not nil. -func (v *HistoryService_PollMutableState_Result) IsSetCurrentBranchChangedError() bool { - return v != nil && v.CurrentBranchChangedError != nil +// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. +func (v *HistoryService_QueryWorkflow_Result) IsSetClientVersionNotSupportedError() bool { + return v != nil && v.ClientVersionNotSupportedError != nil } // MethodName returns the name of the Thrift function as specified in // the IDL, for which this struct represent the result. // -// This will always be "PollMutableState" for this struct. -func (v *HistoryService_PollMutableState_Result) MethodName() string { - return "PollMutableState" +// This will always be "QueryWorkflow" for this struct. +func (v *HistoryService_QueryWorkflow_Result) MethodName() string { + return "QueryWorkflow" } // EnvelopeType returns the kind of value inside this struct. // // This will always be Reply for this struct. -func (v *HistoryService_PollMutableState_Result) EnvelopeType() wire.EnvelopeType { +func (v *HistoryService_QueryWorkflow_Result) EnvelopeType() wire.EnvelopeType { return wire.Reply } -// HistoryService_QueryWorkflow_Args represents the arguments for the HistoryService.QueryWorkflow function. +// HistoryService_ReadDLQMessages_Args represents the arguments for the HistoryService.ReadDLQMessages function. // -// The arguments for QueryWorkflow are sent and received over the wire as this struct. -type HistoryService_QueryWorkflow_Args struct { - QueryRequest *QueryWorkflowRequest `json:"queryRequest,omitempty"` +// The arguments for ReadDLQMessages are sent and received over the wire as this struct. +type HistoryService_ReadDLQMessages_Args struct { + Request *replicator.ReadDLQMessagesRequest `json:"request,omitempty"` } -// ToWire translates a HistoryService_QueryWorkflow_Args struct into a Thrift-level intermediate +// ToWire translates a HistoryService_ReadDLQMessages_Args struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -18340,7 +20495,7 @@ type HistoryService_QueryWorkflow_Args struct { // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *HistoryService_QueryWorkflow_Args) ToWire() (wire.Value, error) { +func (v *HistoryService_ReadDLQMessages_Args) ToWire() (wire.Value, error) { var ( fields [1]wire.Field i int = 0 @@ -18348,8 +20503,8 @@ func (v *HistoryService_QueryWorkflow_Args) ToWire() (wire.Value, error) { err error ) - if v.QueryRequest != nil { - w, err = v.QueryRequest.ToWire() + if v.Request != nil { + w, err = v.Request.ToWire() if err != nil { return w, err } @@ -18360,17 +20515,17 @@ func (v *HistoryService_QueryWorkflow_Args) ToWire() (wire.Value, error) { return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _QueryWorkflowRequest_1_Read(w wire.Value) (*QueryWorkflowRequest, error) { - var v QueryWorkflowRequest +func _ReadDLQMessagesRequest_Read(w wire.Value) (*replicator.ReadDLQMessagesRequest, error) { + var v replicator.ReadDLQMessagesRequest err := v.FromWire(w) return &v, err } -// FromWire deserializes a HistoryService_QueryWorkflow_Args struct from its Thrift-level +// FromWire deserializes a HistoryService_ReadDLQMessages_Args struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a HistoryService_QueryWorkflow_Args struct +// An error is returned if we were unable to build a HistoryService_ReadDLQMessages_Args struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -18378,19 +20533,19 @@ func _QueryWorkflowRequest_1_Read(w wire.Value) (*QueryWorkflowRequest, error) { // return nil, err // } // -// var v HistoryService_QueryWorkflow_Args +// var v HistoryService_ReadDLQMessages_Args // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *HistoryService_QueryWorkflow_Args) FromWire(w wire.Value) error { +func (v *HistoryService_ReadDLQMessages_Args) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { switch field.ID { case 1: if field.Value.Type() == wire.TStruct { - v.QueryRequest, err = _QueryWorkflowRequest_1_Read(field.Value) + v.Request, err = _ReadDLQMessagesRequest_Read(field.Value) if err != nil { return err } @@ -18402,34 +20557,34 @@ func (v *HistoryService_QueryWorkflow_Args) FromWire(w wire.Value) error { return nil } -// String returns a readable string representation of a HistoryService_QueryWorkflow_Args +// String returns a readable string representation of a HistoryService_ReadDLQMessages_Args // struct. -func (v *HistoryService_QueryWorkflow_Args) String() string { +func (v *HistoryService_ReadDLQMessages_Args) String() string { if v == nil { return "" } var fields [1]string i := 0 - if v.QueryRequest != nil { - fields[i] = fmt.Sprintf("QueryRequest: %v", v.QueryRequest) + if v.Request != nil { + fields[i] = fmt.Sprintf("Request: %v", v.Request) i++ } - return fmt.Sprintf("HistoryService_QueryWorkflow_Args{%v}", strings.Join(fields[:i], ", ")) + return fmt.Sprintf("HistoryService_ReadDLQMessages_Args{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this HistoryService_QueryWorkflow_Args match the -// provided HistoryService_QueryWorkflow_Args. +// Equals returns true if all the fields of this HistoryService_ReadDLQMessages_Args match the +// provided HistoryService_ReadDLQMessages_Args. // // This function performs a deep comparison. -func (v *HistoryService_QueryWorkflow_Args) Equals(rhs *HistoryService_QueryWorkflow_Args) bool { +func (v *HistoryService_ReadDLQMessages_Args) Equals(rhs *HistoryService_ReadDLQMessages_Args) bool { if v == nil { return rhs == nil } else if rhs == nil { return false } - if !((v.QueryRequest == nil && rhs.QueryRequest == nil) || (v.QueryRequest != nil && rhs.QueryRequest != nil && v.QueryRequest.Equals(rhs.QueryRequest))) { + if !((v.Request == nil && rhs.Request == nil) || (v.Request != nil && rhs.Request != nil && v.Request.Equals(rhs.Request))) { return false } @@ -18437,168 +20592,154 @@ func (v *HistoryService_QueryWorkflow_Args) Equals(rhs *HistoryService_QueryWork } // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of HistoryService_QueryWorkflow_Args. -func (v *HistoryService_QueryWorkflow_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of HistoryService_ReadDLQMessages_Args. +func (v *HistoryService_ReadDLQMessages_Args) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } - if v.QueryRequest != nil { - err = multierr.Append(err, enc.AddObject("queryRequest", v.QueryRequest)) + if v.Request != nil { + err = multierr.Append(err, enc.AddObject("request", v.Request)) } return err } -// GetQueryRequest returns the value of QueryRequest if it is set or its +// GetRequest returns the value of Request if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Args) GetQueryRequest() (o *QueryWorkflowRequest) { - if v != nil && v.QueryRequest != nil { - return v.QueryRequest +func (v *HistoryService_ReadDLQMessages_Args) GetRequest() (o *replicator.ReadDLQMessagesRequest) { + if v != nil && v.Request != nil { + return v.Request } return } -// IsSetQueryRequest returns true if QueryRequest is not nil. -func (v *HistoryService_QueryWorkflow_Args) IsSetQueryRequest() bool { - return v != nil && v.QueryRequest != nil +// IsSetRequest returns true if Request is not nil. +func (v *HistoryService_ReadDLQMessages_Args) IsSetRequest() bool { + return v != nil && v.Request != nil } // MethodName returns the name of the Thrift function as specified in // the IDL, for which this struct represent the arguments. // -// This will always be "QueryWorkflow" for this struct. -func (v *HistoryService_QueryWorkflow_Args) MethodName() string { - return "QueryWorkflow" +// This will always be "ReadDLQMessages" for this struct. +func (v *HistoryService_ReadDLQMessages_Args) MethodName() string { + return "ReadDLQMessages" } // EnvelopeType returns the kind of value inside this struct. // // This will always be Call for this struct. -func (v *HistoryService_QueryWorkflow_Args) EnvelopeType() wire.EnvelopeType { +func (v *HistoryService_ReadDLQMessages_Args) EnvelopeType() wire.EnvelopeType { return wire.Call } -// HistoryService_QueryWorkflow_Helper provides functions that aid in handling the -// parameters and return values of the HistoryService.QueryWorkflow +// HistoryService_ReadDLQMessages_Helper provides functions that aid in handling the +// parameters and return values of the HistoryService.ReadDLQMessages // function. -var HistoryService_QueryWorkflow_Helper = struct { - // Args accepts the parameters of QueryWorkflow in-order and returns +var HistoryService_ReadDLQMessages_Helper = struct { + // Args accepts the parameters of ReadDLQMessages in-order and returns // the arguments struct for the function. Args func( - queryRequest *QueryWorkflowRequest, - ) *HistoryService_QueryWorkflow_Args + request *replicator.ReadDLQMessagesRequest, + ) *HistoryService_ReadDLQMessages_Args // IsException returns true if the given error can be thrown - // by QueryWorkflow. + // by ReadDLQMessages. // - // An error can be thrown by QueryWorkflow only if the + // An error can be thrown by ReadDLQMessages only if the // corresponding exception type was mentioned in the 'throws' // section for it in the Thrift file. IsException func(error) bool - // WrapResponse returns the result struct for QueryWorkflow + // WrapResponse returns the result struct for ReadDLQMessages // given its return value and error. // // This allows mapping values and errors returned by - // QueryWorkflow into a serializable result struct. + // ReadDLQMessages into a serializable result struct. // WrapResponse returns a non-nil error if the provided - // error cannot be thrown by QueryWorkflow + // error cannot be thrown by ReadDLQMessages // - // value, err := QueryWorkflow(args) - // result, err := HistoryService_QueryWorkflow_Helper.WrapResponse(value, err) + // value, err := ReadDLQMessages(args) + // result, err := HistoryService_ReadDLQMessages_Helper.WrapResponse(value, err) // if err != nil { - // return fmt.Errorf("unexpected error from QueryWorkflow: %v", err) + // return fmt.Errorf("unexpected error from ReadDLQMessages: %v", err) // } // serialize(result) - WrapResponse func(*QueryWorkflowResponse, error) (*HistoryService_QueryWorkflow_Result, error) + WrapResponse func(*replicator.ReadDLQMessagesResponse, error) (*HistoryService_ReadDLQMessages_Result, error) - // UnwrapResponse takes the result struct for QueryWorkflow + // UnwrapResponse takes the result struct for ReadDLQMessages // and returns the value or error returned by it. // - // The error is non-nil only if QueryWorkflow threw an + // The error is non-nil only if ReadDLQMessages threw an // exception. // // result := deserialize(bytes) - // value, err := HistoryService_QueryWorkflow_Helper.UnwrapResponse(result) - UnwrapResponse func(*HistoryService_QueryWorkflow_Result) (*QueryWorkflowResponse, error) + // value, err := HistoryService_ReadDLQMessages_Helper.UnwrapResponse(result) + UnwrapResponse func(*HistoryService_ReadDLQMessages_Result) (*replicator.ReadDLQMessagesResponse, error) }{} func init() { - HistoryService_QueryWorkflow_Helper.Args = func( - queryRequest *QueryWorkflowRequest, - ) *HistoryService_QueryWorkflow_Args { - return &HistoryService_QueryWorkflow_Args{ - QueryRequest: queryRequest, + HistoryService_ReadDLQMessages_Helper.Args = func( + request *replicator.ReadDLQMessagesRequest, + ) *HistoryService_ReadDLQMessages_Args { + return &HistoryService_ReadDLQMessages_Args{ + Request: request, } } - HistoryService_QueryWorkflow_Helper.IsException = func(err error) bool { + HistoryService_ReadDLQMessages_Helper.IsException = func(err error) bool { switch err.(type) { case *shared.BadRequestError: return true case *shared.InternalServiceError: return true - case *shared.EntityNotExistsError: - return true - case *shared.QueryFailedError: - return true - case *shared.LimitExceededError: - return true case *shared.ServiceBusyError: return true - case *shared.ClientVersionNotSupportedError: + case *shared.EntityNotExistsError: + return true + case *ShardOwnershipLostError: return true default: return false } } - HistoryService_QueryWorkflow_Helper.WrapResponse = func(success *QueryWorkflowResponse, err error) (*HistoryService_QueryWorkflow_Result, error) { + HistoryService_ReadDLQMessages_Helper.WrapResponse = func(success *replicator.ReadDLQMessagesResponse, err error) (*HistoryService_ReadDLQMessages_Result, error) { if err == nil { - return &HistoryService_QueryWorkflow_Result{Success: success}, nil + return &HistoryService_ReadDLQMessages_Result{Success: success}, nil } switch e := err.(type) { case *shared.BadRequestError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.BadRequestError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_ReadDLQMessages_Result.BadRequestError") } - return &HistoryService_QueryWorkflow_Result{BadRequestError: e}, nil + return &HistoryService_ReadDLQMessages_Result{BadRequestError: e}, nil case *shared.InternalServiceError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.InternalServiceError") - } - return &HistoryService_QueryWorkflow_Result{InternalServiceError: e}, nil - case *shared.EntityNotExistsError: - if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.EntityNotExistError") - } - return &HistoryService_QueryWorkflow_Result{EntityNotExistError: e}, nil - case *shared.QueryFailedError: - if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.QueryFailedError") - } - return &HistoryService_QueryWorkflow_Result{QueryFailedError: e}, nil - case *shared.LimitExceededError: - if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.LimitExceededError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_ReadDLQMessages_Result.InternalServiceError") } - return &HistoryService_QueryWorkflow_Result{LimitExceededError: e}, nil + return &HistoryService_ReadDLQMessages_Result{InternalServiceError: e}, nil case *shared.ServiceBusyError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.ServiceBusyError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_ReadDLQMessages_Result.ServiceBusyError") + } + return &HistoryService_ReadDLQMessages_Result{ServiceBusyError: e}, nil + case *shared.EntityNotExistsError: + if e == nil { + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_ReadDLQMessages_Result.EntityNotExistError") } - return &HistoryService_QueryWorkflow_Result{ServiceBusyError: e}, nil - case *shared.ClientVersionNotSupportedError: + return &HistoryService_ReadDLQMessages_Result{EntityNotExistError: e}, nil + case *ShardOwnershipLostError: if e == nil { - return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_QueryWorkflow_Result.ClientVersionNotSupportedError") + return nil, errors.New("WrapResponse received non-nil error type with nil value for HistoryService_ReadDLQMessages_Result.ShardOwnershipLostError") } - return &HistoryService_QueryWorkflow_Result{ClientVersionNotSupportedError: e}, nil + return &HistoryService_ReadDLQMessages_Result{ShardOwnershipLostError: e}, nil } return nil, err } - HistoryService_QueryWorkflow_Helper.UnwrapResponse = func(result *HistoryService_QueryWorkflow_Result) (success *QueryWorkflowResponse, err error) { + HistoryService_ReadDLQMessages_Helper.UnwrapResponse = func(result *HistoryService_ReadDLQMessages_Result) (success *replicator.ReadDLQMessagesResponse, err error) { if result.BadRequestError != nil { err = result.BadRequestError return @@ -18607,24 +20748,16 @@ func init() { err = result.InternalServiceError return } - if result.EntityNotExistError != nil { - err = result.EntityNotExistError - return - } - if result.QueryFailedError != nil { - err = result.QueryFailedError - return - } - if result.LimitExceededError != nil { - err = result.LimitExceededError - return - } if result.ServiceBusyError != nil { err = result.ServiceBusyError return } - if result.ClientVersionNotSupportedError != nil { - err = result.ClientVersionNotSupportedError + if result.EntityNotExistError != nil { + err = result.EntityNotExistError + return + } + if result.ShardOwnershipLostError != nil { + err = result.ShardOwnershipLostError return } @@ -18639,24 +20772,22 @@ func init() { } -// HistoryService_QueryWorkflow_Result represents the result of a HistoryService.QueryWorkflow function call. +// HistoryService_ReadDLQMessages_Result represents the result of a HistoryService.ReadDLQMessages function call. // -// The result of a QueryWorkflow execution is sent and received over the wire as this struct. +// The result of a ReadDLQMessages execution is sent and received over the wire as this struct. // // Success is set only if the function did not throw an exception. -type HistoryService_QueryWorkflow_Result struct { - // Value returned by QueryWorkflow after a successful execution. - Success *QueryWorkflowResponse `json:"success,omitempty"` - BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` - InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` - EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` - QueryFailedError *shared.QueryFailedError `json:"queryFailedError,omitempty"` - LimitExceededError *shared.LimitExceededError `json:"limitExceededError,omitempty"` - ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` - ClientVersionNotSupportedError *shared.ClientVersionNotSupportedError `json:"clientVersionNotSupportedError,omitempty"` +type HistoryService_ReadDLQMessages_Result struct { + // Value returned by ReadDLQMessages after a successful execution. + Success *replicator.ReadDLQMessagesResponse `json:"success,omitempty"` + BadRequestError *shared.BadRequestError `json:"badRequestError,omitempty"` + InternalServiceError *shared.InternalServiceError `json:"internalServiceError,omitempty"` + ServiceBusyError *shared.ServiceBusyError `json:"serviceBusyError,omitempty"` + EntityNotExistError *shared.EntityNotExistsError `json:"entityNotExistError,omitempty"` + ShardOwnershipLostError *ShardOwnershipLostError `json:"shardOwnershipLostError,omitempty"` } -// ToWire translates a HistoryService_QueryWorkflow_Result struct into a Thrift-level intermediate +// ToWire translates a HistoryService_ReadDLQMessages_Result struct into a Thrift-level intermediate // representation. This intermediate representation may be serialized // into bytes using a ThriftRW protocol implementation. // @@ -18671,9 +20802,9 @@ type HistoryService_QueryWorkflow_Result struct { // if err := binaryProtocol.Encode(x, writer); err != nil { // return err // } -func (v *HistoryService_QueryWorkflow_Result) ToWire() (wire.Value, error) { +func (v *HistoryService_ReadDLQMessages_Result) ToWire() (wire.Value, error) { var ( - fields [8]wire.Field + fields [6]wire.Field i int = 0 w wire.Value err error @@ -18703,71 +20834,49 @@ func (v *HistoryService_QueryWorkflow_Result) ToWire() (wire.Value, error) { fields[i] = wire.Field{ID: 2, Value: w} i++ } - if v.EntityNotExistError != nil { - w, err = v.EntityNotExistError.ToWire() + if v.ServiceBusyError != nil { + w, err = v.ServiceBusyError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 3, Value: w} i++ } - if v.QueryFailedError != nil { - w, err = v.QueryFailedError.ToWire() + if v.EntityNotExistError != nil { + w, err = v.EntityNotExistError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 4, Value: w} i++ } - if v.LimitExceededError != nil { - w, err = v.LimitExceededError.ToWire() + if v.ShardOwnershipLostError != nil { + w, err = v.ShardOwnershipLostError.ToWire() if err != nil { return w, err } fields[i] = wire.Field{ID: 5, Value: w} i++ } - if v.ServiceBusyError != nil { - w, err = v.ServiceBusyError.ToWire() - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 6, Value: w} - i++ - } - if v.ClientVersionNotSupportedError != nil { - w, err = v.ClientVersionNotSupportedError.ToWire() - if err != nil { - return w, err - } - fields[i] = wire.Field{ID: 7, Value: w} - i++ - } if i != 1 { - return wire.Value{}, fmt.Errorf("HistoryService_QueryWorkflow_Result should have exactly one field: got %v fields", i) + return wire.Value{}, fmt.Errorf("HistoryService_ReadDLQMessages_Result should have exactly one field: got %v fields", i) } return wire.NewValueStruct(wire.Struct{Fields: fields[:i]}), nil } -func _QueryWorkflowResponse_1_Read(w wire.Value) (*QueryWorkflowResponse, error) { - var v QueryWorkflowResponse - err := v.FromWire(w) - return &v, err -} - -func _QueryFailedError_Read(w wire.Value) (*shared.QueryFailedError, error) { - var v shared.QueryFailedError +func _ReadDLQMessagesResponse_Read(w wire.Value) (*replicator.ReadDLQMessagesResponse, error) { + var v replicator.ReadDLQMessagesResponse err := v.FromWire(w) return &v, err } -// FromWire deserializes a HistoryService_QueryWorkflow_Result struct from its Thrift-level +// FromWire deserializes a HistoryService_ReadDLQMessages_Result struct from its Thrift-level // representation. The Thrift-level representation may be obtained // from a ThriftRW protocol implementation. // -// An error is returned if we were unable to build a HistoryService_QueryWorkflow_Result struct +// An error is returned if we were unable to build a HistoryService_ReadDLQMessages_Result struct // from the provided intermediate representation. // // x, err := binaryProtocol.Decode(reader, wire.TStruct) @@ -18775,19 +20884,19 @@ func _QueryFailedError_Read(w wire.Value) (*shared.QueryFailedError, error) { // return nil, err // } // -// var v HistoryService_QueryWorkflow_Result +// var v HistoryService_ReadDLQMessages_Result // if err := v.FromWire(x); err != nil { // return nil, err // } // return &v, nil -func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { +func (v *HistoryService_ReadDLQMessages_Result) FromWire(w wire.Value) error { var err error for _, field := range w.GetStruct().Fields { switch field.ID { case 0: if field.Value.Type() == wire.TStruct { - v.Success, err = _QueryWorkflowResponse_1_Read(field.Value) + v.Success, err = _ReadDLQMessagesResponse_Read(field.Value) if err != nil { return err } @@ -18811,7 +20920,7 @@ func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { } case 3: if field.Value.Type() == wire.TStruct { - v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) + v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) if err != nil { return err } @@ -18819,7 +20928,7 @@ func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { } case 4: if field.Value.Type() == wire.TStruct { - v.QueryFailedError, err = _QueryFailedError_Read(field.Value) + v.EntityNotExistError, err = _EntityNotExistsError_Read(field.Value) if err != nil { return err } @@ -18827,23 +20936,7 @@ func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { } case 5: if field.Value.Type() == wire.TStruct { - v.LimitExceededError, err = _LimitExceededError_Read(field.Value) - if err != nil { - return err - } - - } - case 6: - if field.Value.Type() == wire.TStruct { - v.ServiceBusyError, err = _ServiceBusyError_Read(field.Value) - if err != nil { - return err - } - - } - case 7: - if field.Value.Type() == wire.TStruct { - v.ClientVersionNotSupportedError, err = _ClientVersionNotSupportedError_Read(field.Value) + v.ShardOwnershipLostError, err = _ShardOwnershipLostError_Read(field.Value) if err != nil { return err } @@ -18862,36 +20955,30 @@ func (v *HistoryService_QueryWorkflow_Result) FromWire(w wire.Value) error { if v.InternalServiceError != nil { count++ } - if v.EntityNotExistError != nil { - count++ - } - if v.QueryFailedError != nil { - count++ - } - if v.LimitExceededError != nil { + if v.ServiceBusyError != nil { count++ } - if v.ServiceBusyError != nil { + if v.EntityNotExistError != nil { count++ } - if v.ClientVersionNotSupportedError != nil { + if v.ShardOwnershipLostError != nil { count++ } if count != 1 { - return fmt.Errorf("HistoryService_QueryWorkflow_Result should have exactly one field: got %v fields", count) + return fmt.Errorf("HistoryService_ReadDLQMessages_Result should have exactly one field: got %v fields", count) } return nil } -// String returns a readable string representation of a HistoryService_QueryWorkflow_Result +// String returns a readable string representation of a HistoryService_ReadDLQMessages_Result // struct. -func (v *HistoryService_QueryWorkflow_Result) String() string { +func (v *HistoryService_ReadDLQMessages_Result) String() string { if v == nil { return "" } - var fields [8]string + var fields [6]string i := 0 if v.Success != nil { fields[i] = fmt.Sprintf("Success: %v", v.Success) @@ -18905,35 +20992,27 @@ func (v *HistoryService_QueryWorkflow_Result) String() string { fields[i] = fmt.Sprintf("InternalServiceError: %v", v.InternalServiceError) i++ } - if v.EntityNotExistError != nil { - fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) - i++ - } - if v.QueryFailedError != nil { - fields[i] = fmt.Sprintf("QueryFailedError: %v", v.QueryFailedError) - i++ - } - if v.LimitExceededError != nil { - fields[i] = fmt.Sprintf("LimitExceededError: %v", v.LimitExceededError) - i++ - } if v.ServiceBusyError != nil { fields[i] = fmt.Sprintf("ServiceBusyError: %v", v.ServiceBusyError) i++ } - if v.ClientVersionNotSupportedError != nil { - fields[i] = fmt.Sprintf("ClientVersionNotSupportedError: %v", v.ClientVersionNotSupportedError) + if v.EntityNotExistError != nil { + fields[i] = fmt.Sprintf("EntityNotExistError: %v", v.EntityNotExistError) + i++ + } + if v.ShardOwnershipLostError != nil { + fields[i] = fmt.Sprintf("ShardOwnershipLostError: %v", v.ShardOwnershipLostError) i++ } - return fmt.Sprintf("HistoryService_QueryWorkflow_Result{%v}", strings.Join(fields[:i], ", ")) + return fmt.Sprintf("HistoryService_ReadDLQMessages_Result{%v}", strings.Join(fields[:i], ", ")) } -// Equals returns true if all the fields of this HistoryService_QueryWorkflow_Result match the -// provided HistoryService_QueryWorkflow_Result. +// Equals returns true if all the fields of this HistoryService_ReadDLQMessages_Result match the +// provided HistoryService_ReadDLQMessages_Result. // // This function performs a deep comparison. -func (v *HistoryService_QueryWorkflow_Result) Equals(rhs *HistoryService_QueryWorkflow_Result) bool { +func (v *HistoryService_ReadDLQMessages_Result) Equals(rhs *HistoryService_ReadDLQMessages_Result) bool { if v == nil { return rhs == nil } else if rhs == nil { @@ -18948,19 +21027,13 @@ func (v *HistoryService_QueryWorkflow_Result) Equals(rhs *HistoryService_QueryWo if !((v.InternalServiceError == nil && rhs.InternalServiceError == nil) || (v.InternalServiceError != nil && rhs.InternalServiceError != nil && v.InternalServiceError.Equals(rhs.InternalServiceError))) { return false } - if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { - return false - } - if !((v.QueryFailedError == nil && rhs.QueryFailedError == nil) || (v.QueryFailedError != nil && rhs.QueryFailedError != nil && v.QueryFailedError.Equals(rhs.QueryFailedError))) { - return false - } - if !((v.LimitExceededError == nil && rhs.LimitExceededError == nil) || (v.LimitExceededError != nil && rhs.LimitExceededError != nil && v.LimitExceededError.Equals(rhs.LimitExceededError))) { + if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { return false } - if !((v.ServiceBusyError == nil && rhs.ServiceBusyError == nil) || (v.ServiceBusyError != nil && rhs.ServiceBusyError != nil && v.ServiceBusyError.Equals(rhs.ServiceBusyError))) { + if !((v.EntityNotExistError == nil && rhs.EntityNotExistError == nil) || (v.EntityNotExistError != nil && rhs.EntityNotExistError != nil && v.EntityNotExistError.Equals(rhs.EntityNotExistError))) { return false } - if !((v.ClientVersionNotSupportedError == nil && rhs.ClientVersionNotSupportedError == nil) || (v.ClientVersionNotSupportedError != nil && rhs.ClientVersionNotSupportedError != nil && v.ClientVersionNotSupportedError.Equals(rhs.ClientVersionNotSupportedError))) { + if !((v.ShardOwnershipLostError == nil && rhs.ShardOwnershipLostError == nil) || (v.ShardOwnershipLostError != nil && rhs.ShardOwnershipLostError != nil && v.ShardOwnershipLostError.Equals(rhs.ShardOwnershipLostError))) { return false } @@ -18968,8 +21041,8 @@ func (v *HistoryService_QueryWorkflow_Result) Equals(rhs *HistoryService_QueryWo } // MarshalLogObject implements zapcore.ObjectMarshaler, enabling -// fast logging of HistoryService_QueryWorkflow_Result. -func (v *HistoryService_QueryWorkflow_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { +// fast logging of HistoryService_ReadDLQMessages_Result. +func (v *HistoryService_ReadDLQMessages_Result) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) { if v == nil { return nil } @@ -18982,27 +21055,21 @@ func (v *HistoryService_QueryWorkflow_Result) MarshalLogObject(enc zapcore.Objec if v.InternalServiceError != nil { err = multierr.Append(err, enc.AddObject("internalServiceError", v.InternalServiceError)) } - if v.EntityNotExistError != nil { - err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) - } - if v.QueryFailedError != nil { - err = multierr.Append(err, enc.AddObject("queryFailedError", v.QueryFailedError)) - } - if v.LimitExceededError != nil { - err = multierr.Append(err, enc.AddObject("limitExceededError", v.LimitExceededError)) - } if v.ServiceBusyError != nil { err = multierr.Append(err, enc.AddObject("serviceBusyError", v.ServiceBusyError)) } - if v.ClientVersionNotSupportedError != nil { - err = multierr.Append(err, enc.AddObject("clientVersionNotSupportedError", v.ClientVersionNotSupportedError)) + if v.EntityNotExistError != nil { + err = multierr.Append(err, enc.AddObject("entityNotExistError", v.EntityNotExistError)) + } + if v.ShardOwnershipLostError != nil { + err = multierr.Append(err, enc.AddObject("shardOwnershipLostError", v.ShardOwnershipLostError)) } return err } // GetSuccess returns the value of Success if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetSuccess() (o *QueryWorkflowResponse) { +func (v *HistoryService_ReadDLQMessages_Result) GetSuccess() (o *replicator.ReadDLQMessagesResponse) { if v != nil && v.Success != nil { return v.Success } @@ -19011,13 +21078,13 @@ func (v *HistoryService_QueryWorkflow_Result) GetSuccess() (o *QueryWorkflowResp } // IsSetSuccess returns true if Success is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetSuccess() bool { +func (v *HistoryService_ReadDLQMessages_Result) IsSetSuccess() bool { return v != nil && v.Success != nil } // GetBadRequestError returns the value of BadRequestError if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetBadRequestError() (o *shared.BadRequestError) { +func (v *HistoryService_ReadDLQMessages_Result) GetBadRequestError() (o *shared.BadRequestError) { if v != nil && v.BadRequestError != nil { return v.BadRequestError } @@ -19026,13 +21093,13 @@ func (v *HistoryService_QueryWorkflow_Result) GetBadRequestError() (o *shared.Ba } // IsSetBadRequestError returns true if BadRequestError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetBadRequestError() bool { +func (v *HistoryService_ReadDLQMessages_Result) IsSetBadRequestError() bool { return v != nil && v.BadRequestError != nil } // GetInternalServiceError returns the value of InternalServiceError if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetInternalServiceError() (o *shared.InternalServiceError) { +func (v *HistoryService_ReadDLQMessages_Result) GetInternalServiceError() (o *shared.InternalServiceError) { if v != nil && v.InternalServiceError != nil { return v.InternalServiceError } @@ -19041,97 +21108,67 @@ func (v *HistoryService_QueryWorkflow_Result) GetInternalServiceError() (o *shar } // IsSetInternalServiceError returns true if InternalServiceError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetInternalServiceError() bool { +func (v *HistoryService_ReadDLQMessages_Result) IsSetInternalServiceError() bool { return v != nil && v.InternalServiceError != nil } -// GetEntityNotExistError returns the value of EntityNotExistError if it is set or its -// zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { - if v != nil && v.EntityNotExistError != nil { - return v.EntityNotExistError - } - - return -} - -// IsSetEntityNotExistError returns true if EntityNotExistError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetEntityNotExistError() bool { - return v != nil && v.EntityNotExistError != nil -} - -// GetQueryFailedError returns the value of QueryFailedError if it is set or its -// zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetQueryFailedError() (o *shared.QueryFailedError) { - if v != nil && v.QueryFailedError != nil { - return v.QueryFailedError - } - - return -} - -// IsSetQueryFailedError returns true if QueryFailedError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetQueryFailedError() bool { - return v != nil && v.QueryFailedError != nil -} - -// GetLimitExceededError returns the value of LimitExceededError if it is set or its +// GetServiceBusyError returns the value of ServiceBusyError if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetLimitExceededError() (o *shared.LimitExceededError) { - if v != nil && v.LimitExceededError != nil { - return v.LimitExceededError +func (v *HistoryService_ReadDLQMessages_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { + if v != nil && v.ServiceBusyError != nil { + return v.ServiceBusyError } return } -// IsSetLimitExceededError returns true if LimitExceededError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetLimitExceededError() bool { - return v != nil && v.LimitExceededError != nil +// IsSetServiceBusyError returns true if ServiceBusyError is not nil. +func (v *HistoryService_ReadDLQMessages_Result) IsSetServiceBusyError() bool { + return v != nil && v.ServiceBusyError != nil } -// GetServiceBusyError returns the value of ServiceBusyError if it is set or its +// GetEntityNotExistError returns the value of EntityNotExistError if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetServiceBusyError() (o *shared.ServiceBusyError) { - if v != nil && v.ServiceBusyError != nil { - return v.ServiceBusyError +func (v *HistoryService_ReadDLQMessages_Result) GetEntityNotExistError() (o *shared.EntityNotExistsError) { + if v != nil && v.EntityNotExistError != nil { + return v.EntityNotExistError } return } -// IsSetServiceBusyError returns true if ServiceBusyError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetServiceBusyError() bool { - return v != nil && v.ServiceBusyError != nil +// IsSetEntityNotExistError returns true if EntityNotExistError is not nil. +func (v *HistoryService_ReadDLQMessages_Result) IsSetEntityNotExistError() bool { + return v != nil && v.EntityNotExistError != nil } -// GetClientVersionNotSupportedError returns the value of ClientVersionNotSupportedError if it is set or its +// GetShardOwnershipLostError returns the value of ShardOwnershipLostError if it is set or its // zero value if it is unset. -func (v *HistoryService_QueryWorkflow_Result) GetClientVersionNotSupportedError() (o *shared.ClientVersionNotSupportedError) { - if v != nil && v.ClientVersionNotSupportedError != nil { - return v.ClientVersionNotSupportedError +func (v *HistoryService_ReadDLQMessages_Result) GetShardOwnershipLostError() (o *ShardOwnershipLostError) { + if v != nil && v.ShardOwnershipLostError != nil { + return v.ShardOwnershipLostError } return } -// IsSetClientVersionNotSupportedError returns true if ClientVersionNotSupportedError is not nil. -func (v *HistoryService_QueryWorkflow_Result) IsSetClientVersionNotSupportedError() bool { - return v != nil && v.ClientVersionNotSupportedError != nil +// IsSetShardOwnershipLostError returns true if ShardOwnershipLostError is not nil. +func (v *HistoryService_ReadDLQMessages_Result) IsSetShardOwnershipLostError() bool { + return v != nil && v.ShardOwnershipLostError != nil } // MethodName returns the name of the Thrift function as specified in // the IDL, for which this struct represent the result. // -// This will always be "QueryWorkflow" for this struct. -func (v *HistoryService_QueryWorkflow_Result) MethodName() string { - return "QueryWorkflow" +// This will always be "ReadDLQMessages" for this struct. +func (v *HistoryService_ReadDLQMessages_Result) MethodName() string { + return "ReadDLQMessages" } // EnvelopeType returns the kind of value inside this struct. // // This will always be Reply for this struct. -func (v *HistoryService_QueryWorkflow_Result) EnvelopeType() wire.EnvelopeType { +func (v *HistoryService_ReadDLQMessages_Result) EnvelopeType() wire.EnvelopeType { return wire.Reply } diff --git a/.gen/go/history/historyserviceclient/client.go b/.gen/go/history/historyserviceclient/client.go index 67749991cf2..de58198e348 100644 --- a/.gen/go/history/historyserviceclient/client.go +++ b/.gen/go/history/historyserviceclient/client.go @@ -81,18 +81,36 @@ type Interface interface { opts ...yarpc.CallOption, ) (*replicator.GetReplicationMessagesResponse, error) + MergeDLQMessages( + ctx context.Context, + Request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, + ) (*replicator.MergeDLQMessagesResponse, error) + PollMutableState( ctx context.Context, PollRequest *history.PollMutableStateRequest, opts ...yarpc.CallOption, ) (*history.PollMutableStateResponse, error) + PurgeDLQMessages( + ctx context.Context, + Request *replicator.PurgeDLQMessagesRequest, + opts ...yarpc.CallOption, + ) error + QueryWorkflow( ctx context.Context, QueryRequest *history.QueryWorkflowRequest, opts ...yarpc.CallOption, ) (*history.QueryWorkflowResponse, error) + ReadDLQMessages( + ctx context.Context, + Request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, + ) (*replicator.ReadDLQMessagesResponse, error) + ReapplyEvents( ctx context.Context, ReapplyEventsRequest *history.ReapplyEventsRequest, @@ -435,6 +453,29 @@ func (c client) GetReplicationMessages( return } +func (c client) MergeDLQMessages( + ctx context.Context, + _Request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.MergeDLQMessagesResponse, err error) { + + args := history.HistoryService_MergeDLQMessages_Helper.Args(_Request) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result history.HistoryService_MergeDLQMessages_Result + if err = result.FromWire(body); err != nil { + return + } + + success, err = history.HistoryService_MergeDLQMessages_Helper.UnwrapResponse(&result) + return +} + func (c client) PollMutableState( ctx context.Context, _PollRequest *history.PollMutableStateRequest, @@ -458,6 +499,29 @@ func (c client) PollMutableState( return } +func (c client) PurgeDLQMessages( + ctx context.Context, + _Request *replicator.PurgeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (err error) { + + args := history.HistoryService_PurgeDLQMessages_Helper.Args(_Request) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result history.HistoryService_PurgeDLQMessages_Result + if err = result.FromWire(body); err != nil { + return + } + + err = history.HistoryService_PurgeDLQMessages_Helper.UnwrapResponse(&result) + return +} + func (c client) QueryWorkflow( ctx context.Context, _QueryRequest *history.QueryWorkflowRequest, @@ -481,6 +545,29 @@ func (c client) QueryWorkflow( return } +func (c client) ReadDLQMessages( + ctx context.Context, + _Request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.ReadDLQMessagesResponse, err error) { + + args := history.HistoryService_ReadDLQMessages_Helper.Args(_Request) + + var body wire.Value + body, err = c.c.Call(ctx, args, opts...) + if err != nil { + return + } + + var result history.HistoryService_ReadDLQMessages_Result + if err = result.FromWire(body); err != nil { + return + } + + success, err = history.HistoryService_ReadDLQMessages_Helper.UnwrapResponse(&result) + return +} + func (c client) ReapplyEvents( ctx context.Context, _ReapplyEventsRequest *history.ReapplyEventsRequest, diff --git a/.gen/go/history/historyserviceserver/server.go b/.gen/go/history/historyserviceserver/server.go index 02a7d24314a..7fba041fe7d 100644 --- a/.gen/go/history/historyserviceserver/server.go +++ b/.gen/go/history/historyserviceserver/server.go @@ -72,16 +72,31 @@ type Interface interface { Request *replicator.GetReplicationMessagesRequest, ) (*replicator.GetReplicationMessagesResponse, error) + MergeDLQMessages( + ctx context.Context, + Request *replicator.MergeDLQMessagesRequest, + ) (*replicator.MergeDLQMessagesResponse, error) + PollMutableState( ctx context.Context, PollRequest *history.PollMutableStateRequest, ) (*history.PollMutableStateResponse, error) + PurgeDLQMessages( + ctx context.Context, + Request *replicator.PurgeDLQMessagesRequest, + ) error + QueryWorkflow( ctx context.Context, QueryRequest *history.QueryWorkflowRequest, ) (*history.QueryWorkflowResponse, error) + ReadDLQMessages( + ctx context.Context, + Request *replicator.ReadDLQMessagesRequest, + ) (*replicator.ReadDLQMessagesResponse, error) + ReapplyEvents( ctx context.Context, ReapplyEventsRequest *history.ReapplyEventsRequest, @@ -301,6 +316,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: history.ThriftModule, }, + thrift.Method{ + Name: "MergeDLQMessages", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.MergeDLQMessages), + }, + Signature: "MergeDLQMessages(Request *replicator.MergeDLQMessagesRequest) (*replicator.MergeDLQMessagesResponse)", + ThriftModule: history.ThriftModule, + }, + thrift.Method{ Name: "PollMutableState", HandlerSpec: thrift.HandlerSpec{ @@ -312,6 +338,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: history.ThriftModule, }, + thrift.Method{ + Name: "PurgeDLQMessages", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.PurgeDLQMessages), + }, + Signature: "PurgeDLQMessages(Request *replicator.PurgeDLQMessagesRequest)", + ThriftModule: history.ThriftModule, + }, + thrift.Method{ Name: "QueryWorkflow", HandlerSpec: thrift.HandlerSpec{ @@ -323,6 +360,17 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { ThriftModule: history.ThriftModule, }, + thrift.Method{ + Name: "ReadDLQMessages", + HandlerSpec: thrift.HandlerSpec{ + + Type: transport.Unary, + Unary: thrift.UnaryHandler(h.ReadDLQMessages), + }, + Signature: "ReadDLQMessages(Request *replicator.ReadDLQMessagesRequest) (*replicator.ReadDLQMessagesResponse)", + ThriftModule: history.ThriftModule, + }, + thrift.Method{ Name: "ReapplyEvents", HandlerSpec: thrift.HandlerSpec{ @@ -611,7 +659,7 @@ func New(impl Interface, opts ...thrift.RegisterOption) []transport.Procedure { }, } - procedures := make([]transport.Procedure, 0, 35) + procedures := make([]transport.Procedure, 0, 38) procedures = append(procedures, thrift.BuildProcedures(service, opts...)...) return procedures } @@ -751,6 +799,25 @@ func (h handler) GetReplicationMessages(ctx context.Context, body wire.Value) (t return response, err } +func (h handler) MergeDLQMessages(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args history.HistoryService_MergeDLQMessages_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, err + } + + success, err := h.impl.MergeDLQMessages(ctx, args.Request) + + hadError := err != nil + result, err := history.HistoryService_MergeDLQMessages_Helper.WrapResponse(success, err) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + } + return response, err +} + func (h handler) PollMutableState(ctx context.Context, body wire.Value) (thrift.Response, error) { var args history.HistoryService_PollMutableState_Args if err := args.FromWire(body); err != nil { @@ -770,6 +837,25 @@ func (h handler) PollMutableState(ctx context.Context, body wire.Value) (thrift. return response, err } +func (h handler) PurgeDLQMessages(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args history.HistoryService_PurgeDLQMessages_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, err + } + + err := h.impl.PurgeDLQMessages(ctx, args.Request) + + hadError := err != nil + result, err := history.HistoryService_PurgeDLQMessages_Helper.WrapResponse(err) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + } + return response, err +} + func (h handler) QueryWorkflow(ctx context.Context, body wire.Value) (thrift.Response, error) { var args history.HistoryService_QueryWorkflow_Args if err := args.FromWire(body); err != nil { @@ -789,6 +875,25 @@ func (h handler) QueryWorkflow(ctx context.Context, body wire.Value) (thrift.Res return response, err } +func (h handler) ReadDLQMessages(ctx context.Context, body wire.Value) (thrift.Response, error) { + var args history.HistoryService_ReadDLQMessages_Args + if err := args.FromWire(body); err != nil { + return thrift.Response{}, err + } + + success, err := h.impl.ReadDLQMessages(ctx, args.Request) + + hadError := err != nil + result, err := history.HistoryService_ReadDLQMessages_Helper.WrapResponse(success, err) + + var response thrift.Response + if err == nil { + response.IsApplicationError = hadError + response.Body = result + } + return response, err +} + func (h handler) ReapplyEvents(ctx context.Context, body wire.Value) (thrift.Response, error) { var args history.HistoryService_ReapplyEvents_Args if err := args.FromWire(body); err != nil { diff --git a/.gen/go/history/historyservicetest/client.go b/.gen/go/history/historyservicetest/client.go index 7fc82841de5..81061c19c4e 100644 --- a/.gen/go/history/historyservicetest/client.go +++ b/.gen/go/history/historyservicetest/client.go @@ -295,6 +295,39 @@ func (mr *_MockClientRecorder) GetReplicationMessages( return mr.mock.ctrl.RecordCall(mr.mock, "GetReplicationMessages", args...) } +// MergeDLQMessages responds to a MergeDLQMessages call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().MergeDLQMessages(gomock.Any(), ...).Return(...) +// ... := client.MergeDLQMessages(...) +func (m *MockClient) MergeDLQMessages( + ctx context.Context, + _Request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.MergeDLQMessagesResponse, err error) { + + args := []interface{}{ctx, _Request} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "MergeDLQMessages", args...) + success, _ = ret[i].(*replicator.MergeDLQMessagesResponse) + i++ + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) MergeDLQMessages( + ctx interface{}, + _Request interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _Request}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "MergeDLQMessages", args...) +} + // PollMutableState responds to a PollMutableState call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. @@ -328,6 +361,37 @@ func (mr *_MockClientRecorder) PollMutableState( return mr.mock.ctrl.RecordCall(mr.mock, "PollMutableState", args...) } +// PurgeDLQMessages responds to a PurgeDLQMessages call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().PurgeDLQMessages(gomock.Any(), ...).Return(...) +// ... := client.PurgeDLQMessages(...) +func (m *MockClient) PurgeDLQMessages( + ctx context.Context, + _Request *replicator.PurgeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (err error) { + + args := []interface{}{ctx, _Request} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "PurgeDLQMessages", args...) + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) PurgeDLQMessages( + ctx interface{}, + _Request interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _Request}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "PurgeDLQMessages", args...) +} + // QueryWorkflow responds to a QueryWorkflow call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. @@ -361,6 +425,39 @@ func (mr *_MockClientRecorder) QueryWorkflow( return mr.mock.ctrl.RecordCall(mr.mock, "QueryWorkflow", args...) } +// ReadDLQMessages responds to a ReadDLQMessages call based on the mock expectations. This +// call will fail if the mock does not expect this call. Use EXPECT to expect +// a call to this function. +// +// client.EXPECT().ReadDLQMessages(gomock.Any(), ...).Return(...) +// ... := client.ReadDLQMessages(...) +func (m *MockClient) ReadDLQMessages( + ctx context.Context, + _Request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, +) (success *replicator.ReadDLQMessagesResponse, err error) { + + args := []interface{}{ctx, _Request} + for _, o := range opts { + args = append(args, o) + } + i := 0 + ret := m.ctrl.Call(m, "ReadDLQMessages", args...) + success, _ = ret[i].(*replicator.ReadDLQMessagesResponse) + i++ + err, _ = ret[i].(error) + return +} + +func (mr *_MockClientRecorder) ReadDLQMessages( + ctx interface{}, + _Request interface{}, + opts ...interface{}, +) *gomock.Call { + args := append([]interface{}{ctx, _Request}, opts...) + return mr.mock.ctrl.RecordCall(mr.mock, "ReadDLQMessages", args...) +} + // ReapplyEvents responds to a ReapplyEvents call based on the mock expectations. This // call will fail if the mock does not expect this call. Use EXPECT to expect // a call to this function. diff --git a/client/history/client.go b/client/history/client.go index e777b685850..75154bf89d4 100644 --- a/client/history/client.go +++ b/client/history/client.go @@ -857,16 +857,54 @@ func (c *clientImpl) ReapplyEvents( return err } -func (c *clientImpl) RefreshWorkflowTasks( +func (c *clientImpl) ReadDLQMessages( ctx context.Context, - request *h.RefreshWorkflowTasksRequest, + request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.ReadDLQMessagesResponse, error) { + + client, err := c.getClientForShardID(int(request.GetShardID())) + if err != nil { + return nil, err + } + opts = common.AggregateYarpcOptions(ctx, opts...) + return client.ReadDLQMessages(ctx, request, opts...) +} + +func (c *clientImpl) PurgeDLQMessages( + ctx context.Context, + request *replicator.PurgeDLQMessagesRequest, opts ...yarpc.CallOption, ) error { - client, err := c.getClientForWorkflowID(request.GetRequest().GetExecution().GetWorkflowId()) + + client, err := c.getClientForShardID(int(request.GetShardID())) if err != nil { return err } opts = common.AggregateYarpcOptions(ctx, opts...) + return client.PurgeDLQMessages(ctx, request, opts...) +} + +func (c *clientImpl) MergeDLQMessages( + ctx context.Context, + request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.MergeDLQMessagesResponse, error) { + + client, err := c.getClientForShardID(int(request.GetShardID())) + if err != nil { + return nil, err + } + opts = common.AggregateYarpcOptions(ctx, opts...) + return client.MergeDLQMessages(ctx, request, opts...) +} + +func (c *clientImpl) RefreshWorkflowTasks( + ctx context.Context, + request *h.RefreshWorkflowTasksRequest, + opts ...yarpc.CallOption, +) error { + client, err := c.getClientForWorkflowID(request.GetRequest().GetExecution().GetWorkflowId()) op := func(ctx context.Context, client historyserviceclient.Interface) error { ctx, cancel := c.createContext(ctx) defer cancel() diff --git a/client/history/metricClient.go b/client/history/metricClient.go index 9eadcb77cb7..0dec77dc350 100644 --- a/client/history/metricClient.go +++ b/client/history/metricClient.go @@ -595,6 +595,57 @@ func (c *metricClient) ReapplyEvents( return err } +func (c *metricClient) ReadDLQMessages( + ctx context.Context, + request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.ReadDLQMessagesResponse, error) { + + c.metricsClient.IncCounter(metrics.HistoryClientReadDLQMessagesScope, metrics.CadenceClientRequests) + sw := c.metricsClient.StartTimer(metrics.HistoryClientReadDLQMessagesScope, metrics.CadenceClientLatency) + resp, err := c.client.ReadDLQMessages(ctx, request, opts...) + sw.Stop() + + if err != nil { + c.metricsClient.IncCounter(metrics.HistoryClientReadDLQMessagesScope, metrics.CadenceClientFailures) + } + return resp, err +} + +func (c *metricClient) PurgeDLQMessages( + ctx context.Context, + request *replicator.PurgeDLQMessagesRequest, + opts ...yarpc.CallOption, +) error { + + c.metricsClient.IncCounter(metrics.HistoryClientPurgeDLQMessagesScope, metrics.CadenceClientRequests) + sw := c.metricsClient.StartTimer(metrics.HistoryClientPurgeDLQMessagesScope, metrics.CadenceClientLatency) + err := c.client.PurgeDLQMessages(ctx, request, opts...) + sw.Stop() + + if err != nil { + c.metricsClient.IncCounter(metrics.HistoryClientPurgeDLQMessagesScope, metrics.CadenceClientFailures) + } + return err +} + +func (c *metricClient) MergeDLQMessages( + ctx context.Context, + request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.MergeDLQMessagesResponse, error) { + + c.metricsClient.IncCounter(metrics.HistoryClientMergeDLQMessagesScope, metrics.CadenceClientRequests) + sw := c.metricsClient.StartTimer(metrics.HistoryClientMergeDLQMessagesScope, metrics.CadenceClientLatency) + resp, err := c.client.MergeDLQMessages(ctx, request, opts...) + sw.Stop() + + if err != nil { + c.metricsClient.IncCounter(metrics.HistoryClientMergeDLQMessagesScope, metrics.CadenceClientFailures) + } + return resp, err +} + func (c *metricClient) RefreshWorkflowTasks( ctx context.Context, request *h.RefreshWorkflowTasksRequest, diff --git a/client/history/retryableClient.go b/client/history/retryableClient.go index 39421d48ecf..b76d257bf6e 100644 --- a/client/history/retryableClient.go +++ b/client/history/retryableClient.go @@ -525,6 +525,53 @@ func (c *retryableClient) ReapplyEvents( return backoff.Retry(op, c.policy, c.isRetryable) } +func (c *retryableClient) ReadDLQMessages( + ctx context.Context, + request *replicator.ReadDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.ReadDLQMessagesResponse, error) { + + var resp *replicator.ReadDLQMessagesResponse + op := func() error { + var err error + resp, err = c.client.ReadDLQMessages(ctx, request, opts...) + return err + } + + err := backoff.Retry(op, c.policy, c.isRetryable) + return resp, err +} + +func (c *retryableClient) PurgeDLQMessages( + ctx context.Context, + request *replicator.PurgeDLQMessagesRequest, + opts ...yarpc.CallOption, +) error { + + op := func() error { + return c.client.PurgeDLQMessages(ctx, request, opts...) + } + + return backoff.Retry(op, c.policy, c.isRetryable) +} + +func (c *retryableClient) MergeDLQMessages( + ctx context.Context, + request *replicator.MergeDLQMessagesRequest, + opts ...yarpc.CallOption, +) (*replicator.MergeDLQMessagesResponse, error) { + + var resp *replicator.MergeDLQMessagesResponse + op := func() error { + var err error + resp, err = c.client.MergeDLQMessages(ctx, request, opts...) + return err + } + + err := backoff.Retry(op, c.policy, c.isRetryable) + return resp, err +} + func (c *retryableClient) RefreshWorkflowTasks( ctx context.Context, request *h.RefreshWorkflowTasksRequest, diff --git a/common/metrics/defs.go b/common/metrics/defs.go index 87326097d21..d9197e543c6 100644 --- a/common/metrics/defs.go +++ b/common/metrics/defs.go @@ -168,6 +168,10 @@ const ( PersistencePutReplicationTaskToDLQScope // PersistenceGetReplicationTasksFromDLQScope tracks PersistenceGetReplicationTasksFromDLQScope calls made by service to persistence layer PersistenceGetReplicationTasksFromDLQScope + // PersistenceDeleteReplicationTaskFromDLQScope tracks PersistenceDeleteReplicationTaskFromDLQScope calls made by service to persistence layer + PersistenceDeleteReplicationTaskFromDLQScope + // PersistenceRangeDeleteReplicationTaskFromDLQScope tracks PersistenceRangeDeleteReplicationTaskFromDLQScope calls made by service to persistence layer + PersistenceRangeDeleteReplicationTaskFromDLQScope // PersistenceGetTimerIndexTasksScope tracks GetTimerIndexTasks calls made by service to persistence layer PersistenceGetTimerIndexTasksScope // PersistenceCompleteTimerTaskScope tracks CompleteTimerTasks calls made by service to persistence layer @@ -322,6 +326,12 @@ const ( HistoryClientQueryWorkflowScope // HistoryClientReapplyEventsScope tracks RPC calls to history service HistoryClientReapplyEventsScope + // HistoryClientReadDLQMessagesScope tracks RPC calls to history service + HistoryClientReadDLQMessagesScope + // HistoryClientPurgeDLQMessagesScope tracks RPC calls to history service + HistoryClientPurgeDLQMessagesScope + // HistoryClientMergeDLQMessagesScope tracks RPC calls to history service + HistoryClientMergeDLQMessagesScope // HistoryClientRefreshWorkflowTasksScope tracks RPC calls to history service HistoryClientRefreshWorkflowTasksScope // MatchingClientPollForDecisionTaskScope tracks RPC calls to matching service @@ -803,6 +813,12 @@ const ( HistoryGetReplicationMessagesScope // HistoryGetDLQReplicationMessagesScope tracks GetReplicationMessages API calls received by service HistoryGetDLQReplicationMessagesScope + // HistoryReadDLQMessagesScope tracks ReadDLQMessages API calls received by service + HistoryReadDLQMessagesScope + // HistoryPurgeDLQMessagesScope tracks PurgeDLQMessages API calls received by service + HistoryPurgeDLQMessagesScope + // HistoryMergeDLQMessagesScope tracks MergeDLQMessages API calls received by service + HistoryMergeDLQMessagesScope // HistoryShardControllerScope is the scope used by shard controller HistoryShardControllerScope // HistoryReapplyEventsScope is the scope used by event reapplication @@ -1033,8 +1049,10 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ PersistenceGetReplicationTasksScope: {operation: "GetReplicationTasks"}, PersistenceCompleteReplicationTaskScope: {operation: "CompleteReplicationTask"}, PersistenceRangeCompleteReplicationTaskScope: {operation: "RangeCompleteReplicationTask"}, - PersistencePutReplicationTaskToDLQScope: {operation: "PersistencePutReplicationTaskToDLQ"}, - PersistenceGetReplicationTasksFromDLQScope: {operation: "PersistenceGetReplicationTasksFromDLQ"}, + PersistencePutReplicationTaskToDLQScope: {operation: "PutReplicationTaskToDLQ"}, + PersistenceGetReplicationTasksFromDLQScope: {operation: "GetReplicationTasksFromDLQ"}, + PersistenceDeleteReplicationTaskFromDLQScope: {operation: "DeleteReplicationTaskFromDLQ"}, + PersistenceRangeDeleteReplicationTaskFromDLQScope: {operation: "RangeDeleteReplicationTaskFromDLQ"}, PersistenceGetTimerIndexTasksScope: {operation: "GetTimerIndexTasks"}, PersistenceCompleteTimerTaskScope: {operation: "CompleteTimerTask"}, PersistenceRangeCompleteTimerTaskScope: {operation: "RangeCompleteTimerTask"}, @@ -1123,6 +1141,9 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ HistoryClientGetDLQReplicationTasksScope: {operation: "HistoryClientGetDLQReplicationTasksScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, HistoryClientQueryWorkflowScope: {operation: "HistoryClientQueryWorkflowScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, HistoryClientReapplyEventsScope: {operation: "HistoryClientReapplyEventsScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, + HistoryClientReadDLQMessagesScope: {operation: "HistoryClientReadDLQMessagesScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, + HistoryClientPurgeDLQMessagesScope: {operation: "HistoryClientPurgeDLQMessagesScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, + HistoryClientMergeDLQMessagesScope: {operation: "HistoryClientMergeDLQMessagesScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, HistoryClientRefreshWorkflowTasksScope: {operation: "HistoryClientRefreshWorkflowTasksScope", tags: map[string]string{CadenceRoleTagName: HistoryRoleTagValue}}, MatchingClientPollForDecisionTaskScope: {operation: "MatchingClientPollForDecisionTask", tags: map[string]string{CadenceRoleTagName: MatchingRoleTagValue}}, MatchingClientPollForActivityTaskScope: {operation: "MatchingClientPollForActivityTask", tags: map[string]string{CadenceRoleTagName: MatchingRoleTagValue}}, @@ -1356,6 +1377,9 @@ var ScopeDefs = map[ServiceIdx]map[int]scopeDefinition{ HistoryDescribeMutableStateScope: {operation: "DescribeMutableState"}, HistoryGetReplicationMessagesScope: {operation: "GetReplicationMessages"}, HistoryGetDLQReplicationMessagesScope: {operation: "GetDLQReplicationMessages"}, + HistoryReadDLQMessagesScope: {operation: "ReadDLQMessages"}, + HistoryPurgeDLQMessagesScope: {operation: "PurgeDLQMessages"}, + HistoryMergeDLQMessagesScope: {operation: "MergeDLQMessages"}, HistoryShardControllerScope: {operation: "ShardController"}, HistoryReapplyEventsScope: {operation: "EventReapplication"}, HistoryRefreshWorkflowTasksScope: {operation: "RefreshWorkflowTasks"}, diff --git a/common/mocks/ExecutionManager.go b/common/mocks/ExecutionManager.go index 65f8f20bbff..87a09fe92b3 100644 --- a/common/mocks/ExecutionManager.go +++ b/common/mocks/ExecutionManager.go @@ -357,6 +357,40 @@ func (_m *ExecutionManager) GetReplicationTasksFromDLQ(request *persistence.GetR return r0, r1 } +// DeleteReplicationTaskFromDLQ provides a mock function with given fields: request +func (_m *ExecutionManager) DeleteReplicationTaskFromDLQ( + request *persistence.DeleteReplicationTaskFromDLQRequest, +) error { + + ret := _m.Called(request) + + var r0 error + if rf, ok := ret.Get(0).(func(*persistence.DeleteReplicationTaskFromDLQRequest) error); ok { + r0 = rf(request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// RangeDeleteReplicationTaskFromDLQ provides a mock function with given fields: request +func (_m *ExecutionManager) RangeDeleteReplicationTaskFromDLQ( + request *persistence.RangeDeleteReplicationTaskFromDLQRequest, +) error { + + ret := _m.Called(request) + + var r0 error + if rf, ok := ret.Get(0).(func(*persistence.RangeDeleteReplicationTaskFromDLQRequest) error); ok { + r0 = rf(request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + // GetTimerIndexTasks provides a mock function with given fields: request func (_m *ExecutionManager) GetTimerIndexTasks(request *persistence.GetTimerIndexTasksRequest) (*persistence.GetTimerIndexTasksResponse, error) { ret := _m.Called(request) diff --git a/common/persistence/cassandra/cassandraPersistence.go b/common/persistence/cassandra/cassandraPersistence.go index 59ef16650e2..7c6235ac6b8 100644 --- a/common/persistence/cassandra/cassandraPersistence.go +++ b/common/persistence/cassandra/cassandraPersistence.go @@ -742,7 +742,7 @@ workflow_state = ? ` + `and task_id > ? ` + `and task_id <= ?` - templateRangeCompleteReplicationTaskQuery = `DELETE FROM executions ` + + templateCompleteReplicationTaskBeforeQuery = `DELETE FROM executions ` + `WHERE shard_id = ? ` + `and type = ? ` + `and domain_id = ? ` + @@ -751,6 +751,10 @@ workflow_state = ? ` + `and visibility_ts = ? ` + `and task_id <= ?` + templateCompleteReplicationTaskQuery = templateCompleteTransferTaskQuery + + templateRangeCompleteReplicationTaskQuery = templateRangeCompleteTransferTaskQuery + templateGetTimerTasksQuery = `SELECT timer ` + `FROM executions ` + `WHERE shard_id = ? ` + @@ -2224,7 +2228,7 @@ func (d *cassandraPersistence) RangeCompleteTransferTask(request *p.RangeComplet } func (d *cassandraPersistence) CompleteReplicationTask(request *p.CompleteReplicationTaskRequest) error { - query := d.session.Query(templateCompleteTransferTaskQuery, + query := d.session.Query(templateCompleteReplicationTaskQuery, d.shardID, rowTypeReplicationTask, rowTypeReplicationDomainID, @@ -2252,7 +2256,7 @@ func (d *cassandraPersistence) RangeCompleteReplicationTask( request *p.RangeCompleteReplicationTaskRequest, ) error { - query := d.session.Query(templateRangeCompleteReplicationTaskQuery, + query := d.session.Query(templateCompleteReplicationTaskBeforeQuery, d.shardID, rowTypeReplicationTask, rowTypeReplicationDomainID, @@ -2776,6 +2780,8 @@ func (d *cassandraPersistence) GetTimerIndexTasks(request *p.GetTimerIndexTasksR func (d *cassandraPersistence) PutReplicationTaskToDLQ(request *p.PutReplicationTaskToDLQRequest) error { task := request.TaskInfo + + // Use source cluster name as the workflow id for replication dlq query := d.session.Query(templateCreateReplicationTaskQuery, d.shardID, rowTypeDLQ, @@ -2832,3 +2838,60 @@ func (d *cassandraPersistence) GetReplicationTasksFromDLQ( return d.populateGetReplicationTasksResponse(query) } + +func (d *cassandraPersistence) DeleteReplicationTaskFromDLQ( + request *p.DeleteReplicationTaskFromDLQRequest, +) error { + + query := d.session.Query(templateCompleteReplicationTaskQuery, + d.shardID, + rowTypeDLQ, + rowTypeDLQDomainID, + request.SourceClusterName, + rowTypeDLQRunID, + defaultVisibilityTimestamp, + request.TaskID, + ) + + err := query.Exec() + if err != nil { + if isThrottlingError(err) { + return &workflow.ServiceBusyError{ + Message: fmt.Sprintf("DeleteReplicationTaskFromDLQ operation failed. Error: %v", err), + } + } + return &workflow.InternalServiceError{ + Message: fmt.Sprintf("DeleteReplicationTaskFromDLQ operation failed. Error: %v", err), + } + } + return nil +} + +func (d *cassandraPersistence) RangeDeleteReplicationTaskFromDLQ( + request *p.RangeDeleteReplicationTaskFromDLQRequest, +) error { + + query := d.session.Query(templateRangeCompleteReplicationTaskQuery, + d.shardID, + rowTypeDLQ, + rowTypeDLQDomainID, + request.SourceClusterName, + rowTypeDLQRunID, + defaultVisibilityTimestamp, + request.ExclusiveBeginTaskID, + request.InclusiveEndTaskID, + ) + + err := query.Exec() + if err != nil { + if isThrottlingError(err) { + return &workflow.ServiceBusyError{ + Message: fmt.Sprintf("RangeDeleteReplicationTaskFromDLQ operation failed. Error: %v", err), + } + } + return &workflow.InternalServiceError{ + Message: fmt.Sprintf("RangeDeleteReplicationTaskFromDLQ operation failed. Error: %v", err), + } + } + return nil +} diff --git a/common/persistence/dataInterfaces.go b/common/persistence/dataInterfaces.go index a2a71b233eb..a683141d355 100644 --- a/common/persistence/dataInterfaces.go +++ b/common/persistence/dataInterfaces.go @@ -238,6 +238,7 @@ type ( StolenSinceRenew int UpdatedAt time.Time ReplicationAckLevel int64 + ReplicationDLQAckLevel map[string]int64 TransferAckLevel int64 TimerAckLevel time.Time ClusterTransferAckLevel map[string]int64 @@ -991,6 +992,19 @@ type ( GetReplicationTasksRequest } + // DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ + DeleteReplicationTaskFromDLQRequest struct { + SourceClusterName string + TaskID int64 + } + + //RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ + RangeDeleteReplicationTaskFromDLQRequest struct { + SourceClusterName string + ExclusiveBeginTaskID int64 + InclusiveEndTaskID int64 + } + // GetReplicationTasksFromDLQResponse is the response for GetReplicationTasksFromDLQ GetReplicationTasksFromDLQResponse = GetReplicationTasksResponse @@ -1469,6 +1483,8 @@ type ( RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error PutReplicationTaskToDLQ(request *PutReplicationTaskToDLQRequest) error GetReplicationTasksFromDLQ(request *GetReplicationTasksFromDLQRequest) (*GetReplicationTasksFromDLQResponse, error) + DeleteReplicationTaskFromDLQ(request *DeleteReplicationTaskFromDLQRequest) error + RangeDeleteReplicationTaskFromDLQ(request *RangeDeleteReplicationTaskFromDLQRequest) error // Timer related methods. GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error) diff --git a/common/persistence/executionStore.go b/common/persistence/executionStore.go index e556dc00534..2060dc7d240 100644 --- a/common/persistence/executionStore.go +++ b/common/persistence/executionStore.go @@ -834,6 +834,18 @@ func (m *executionManagerImpl) GetReplicationTasksFromDLQ( return m.persistence.GetReplicationTasksFromDLQ(request) } +func (m *executionManagerImpl) DeleteReplicationTaskFromDLQ( + request *DeleteReplicationTaskFromDLQRequest, +) error { + return m.persistence.DeleteReplicationTaskFromDLQ(request) +} + +func (m *executionManagerImpl) RangeDeleteReplicationTaskFromDLQ( + request *RangeDeleteReplicationTaskFromDLQRequest, +) error { + return m.persistence.RangeDeleteReplicationTaskFromDLQ(request) +} + // Timer related methods. func (m *executionManagerImpl) GetTimerIndexTasks( request *GetTimerIndexTasksRequest, diff --git a/common/persistence/persistence-tests/executionManagerTest.go b/common/persistence/persistence-tests/executionManagerTest.go index 5d3bd6a296b..19aa729f355 100644 --- a/common/persistence/persistence-tests/executionManagerTest.go +++ b/common/persistence/persistence-tests/executionManagerTest.go @@ -5231,6 +5231,55 @@ func (s *ExecutionManagerSuite) TestCreateGetUpdateGetShard() { s.Equal(shardInfo, resp.ShardInfo) } +// TestReplicationDLQ test +func (s *ExecutionManagerSuite) TestReplicationDLQ() { + sourceCluster := "test" + taskInfo := &p.ReplicationTaskInfo{ + DomainID: uuid.New(), + WorkflowID: uuid.New(), + RunID: uuid.New(), + TaskID: 0, + TaskType: 0, + } + err := s.PutReplicationTaskToDLQ(sourceCluster, taskInfo) + s.NoError(err) + resp, err := s.GetReplicationTasksFromDLQ(sourceCluster, -1, 0, 1, nil) + s.NoError(err) + s.Len(resp.Tasks, 1) + err = s.DeleteReplicationTaskFromDLQ(sourceCluster, 0) + s.NoError(err) + resp, err = s.GetReplicationTasksFromDLQ(sourceCluster, -1, 0, 1, nil) + s.NoError(err) + s.Len(resp.Tasks, 0) + + taskInfo1 := &p.ReplicationTaskInfo{ + DomainID: uuid.New(), + WorkflowID: uuid.New(), + RunID: uuid.New(), + TaskID: 1, + TaskType: 0, + } + taskInfo2 := &p.ReplicationTaskInfo{ + DomainID: uuid.New(), + WorkflowID: uuid.New(), + RunID: uuid.New(), + TaskID: 2, + TaskType: 0, + } + err = s.PutReplicationTaskToDLQ(sourceCluster, taskInfo1) + s.NoError(err) + err = s.PutReplicationTaskToDLQ(sourceCluster, taskInfo2) + s.NoError(err) + resp, err = s.GetReplicationTasksFromDLQ(sourceCluster, 0, 2, 2, nil) + s.NoError(err) + s.Len(resp.Tasks, 2) + err = s.RangeDeleteReplicationTaskFromDLQ(sourceCluster, 0, 2) + s.NoError(err) + resp, err = s.GetReplicationTasksFromDLQ(sourceCluster, 0, 2, 2, nil) + s.NoError(err) + s.Len(resp.Tasks, 0) +} + func copyWorkflowExecutionInfo(sourceInfo *p.WorkflowExecutionInfo) *p.WorkflowExecutionInfo { return &p.WorkflowExecutionInfo{ DomainID: sourceInfo.DomainID, diff --git a/common/persistence/persistence-tests/persistenceTestBase.go b/common/persistence/persistence-tests/persistenceTestBase.go index 3e76f61a302..837a7b6ff64 100644 --- a/common/persistence/persistence-tests/persistenceTestBase.go +++ b/common/persistence/persistence-tests/persistenceTestBase.go @@ -1091,6 +1091,64 @@ func (s *TestBase) RangeCompleteReplicationTask(inclusiveEndTaskID int64) error }) } +// PutReplicationTaskToDLQ is a utility method to insert a replication task info +func (s *TestBase) PutReplicationTaskToDLQ( + sourceCluster string, + taskInfo *p.ReplicationTaskInfo, +) error { + + return s.ExecutionManager.PutReplicationTaskToDLQ(&p.PutReplicationTaskToDLQRequest{ + SourceClusterName: sourceCluster, + TaskInfo: taskInfo, + }) +} + +// GetReplicationTasksFromDLQ is a utility method to read replication task info +func (s *TestBase) GetReplicationTasksFromDLQ( + sourceCluster string, + readLevel int64, + maxReadLevel int64, + pageSize int, + pageToken []byte, +) (*p.GetReplicationTasksFromDLQResponse, error) { + + return s.ExecutionManager.GetReplicationTasksFromDLQ(&p.GetReplicationTasksFromDLQRequest{ + SourceClusterName: sourceCluster, + GetReplicationTasksRequest: p.GetReplicationTasksRequest{ + ReadLevel: readLevel, + MaxReadLevel: maxReadLevel, + BatchSize: pageSize, + NextPageToken: pageToken, + }, + }) +} + +// DeleteReplicationTaskFromDLQ is a utility method to delete a replication task info +func (s *TestBase) DeleteReplicationTaskFromDLQ( + sourceCluster string, + taskID int64, +) error { + + return s.ExecutionManager.DeleteReplicationTaskFromDLQ(&p.DeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + TaskID: taskID, + }) +} + +// RangeDeleteReplicationTaskFromDLQ is a utility method to delete replication task info +func (s *TestBase) RangeDeleteReplicationTaskFromDLQ( + sourceCluster string, + beginTaskID int64, + endTaskID int64, +) error { + + return s.ExecutionManager.RangeDeleteReplicationTaskFromDLQ(&p.RangeDeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + ExclusiveBeginTaskID: beginTaskID, + InclusiveEndTaskID: endTaskID, + }) +} + // CompleteTransferTask is a utility method to complete a transfer task func (s *TestBase) CompleteTransferTask(taskID int64) error { diff --git a/common/persistence/persistenceInterface.go b/common/persistence/persistenceInterface.go index be186918fa4..515edd5e1ad 100644 --- a/common/persistence/persistenceInterface.go +++ b/common/persistence/persistenceInterface.go @@ -81,6 +81,8 @@ type ( RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error PutReplicationTaskToDLQ(request *PutReplicationTaskToDLQRequest) error GetReplicationTasksFromDLQ(request *GetReplicationTasksFromDLQRequest) (*GetReplicationTasksFromDLQResponse, error) + DeleteReplicationTaskFromDLQ(request *DeleteReplicationTaskFromDLQRequest) error + RangeDeleteReplicationTaskFromDLQ(request *RangeDeleteReplicationTaskFromDLQRequest) error // Timer related methods. GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error) diff --git a/common/persistence/persistenceMetricClients.go b/common/persistence/persistenceMetricClients.go index 9033437b187..b2f68c6fa4a 100644 --- a/common/persistence/persistenceMetricClients.go +++ b/common/persistence/persistenceMetricClients.go @@ -446,6 +446,38 @@ func (p *workflowExecutionPersistenceClient) GetReplicationTasksFromDLQ( return response, err } +func (p *workflowExecutionPersistenceClient) DeleteReplicationTaskFromDLQ( + request *DeleteReplicationTaskFromDLQRequest, +) error { + p.metricClient.IncCounter(metrics.PersistenceDeleteReplicationTaskFromDLQScope, metrics.PersistenceRequests) + + sw := p.metricClient.StartTimer(metrics.PersistenceDeleteReplicationTaskFromDLQScope, metrics.PersistenceLatency) + err := p.persistence.DeleteReplicationTaskFromDLQ(request) + sw.Stop() + + if err != nil { + p.updateErrorMetric(metrics.PersistenceDeleteReplicationTaskFromDLQScope, err) + } + + return nil +} + +func (p *workflowExecutionPersistenceClient) RangeDeleteReplicationTaskFromDLQ( + request *RangeDeleteReplicationTaskFromDLQRequest, +) error { + p.metricClient.IncCounter(metrics.PersistenceRangeDeleteReplicationTaskFromDLQScope, metrics.PersistenceRequests) + + sw := p.metricClient.StartTimer(metrics.PersistenceRangeDeleteReplicationTaskFromDLQScope, metrics.PersistenceLatency) + err := p.persistence.RangeDeleteReplicationTaskFromDLQ(request) + sw.Stop() + + if err != nil { + p.updateErrorMetric(metrics.PersistenceRangeDeleteReplicationTaskFromDLQScope, err) + } + + return nil +} + func (p *workflowExecutionPersistenceClient) GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error) { p.metricClient.IncCounter(metrics.PersistenceGetTimerIndexTasksScope, metrics.PersistenceRequests) diff --git a/common/persistence/persistenceRateLimitedClients.go b/common/persistence/persistenceRateLimitedClients.go index 9cffd231cde..9bbfe634c95 100644 --- a/common/persistence/persistenceRateLimitedClients.go +++ b/common/persistence/persistenceRateLimitedClients.go @@ -337,6 +337,26 @@ func (p *workflowExecutionRateLimitedPersistenceClient) GetReplicationTasksFromD return p.persistence.GetReplicationTasksFromDLQ(request) } +func (p *workflowExecutionRateLimitedPersistenceClient) DeleteReplicationTaskFromDLQ( + request *DeleteReplicationTaskFromDLQRequest, +) error { + if ok := p.rateLimiter.Allow(); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.DeleteReplicationTaskFromDLQ(request) +} + +func (p *workflowExecutionRateLimitedPersistenceClient) RangeDeleteReplicationTaskFromDLQ( + request *RangeDeleteReplicationTaskFromDLQRequest, +) error { + if ok := p.rateLimiter.Allow(); !ok { + return ErrPersistenceLimitExceeded + } + + return p.persistence.RangeDeleteReplicationTaskFromDLQ(request) +} + func (p *workflowExecutionRateLimitedPersistenceClient) GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error) { if ok := p.rateLimiter.Allow(); !ok { return nil, ErrPersistenceLimitExceeded diff --git a/common/persistence/sql/sqlExecutionManager.go b/common/persistence/sql/sqlExecutionManager.go index 05eb2341eec..2d4bb67c86e 100644 --- a/common/persistence/sql/sqlExecutionManager.go +++ b/common/persistence/sql/sqlExecutionManager.go @@ -1058,6 +1058,43 @@ func (m *sqlExecutionManager) GetReplicationTasksFromDLQ( } } +func (m *sqlExecutionManager) DeleteReplicationTaskFromDLQ( + request *p.DeleteReplicationTaskFromDLQRequest, +) error { + + filter := sqlplugin.ReplicationTasksFilter{ + ShardID: m.shardID, + TaskID: request.TaskID, + } + + if _, err := m.db.DeleteMessageFromReplicationTasksDLQ(&sqlplugin.ReplicationTasksDLQFilter{ + ReplicationTasksFilter: filter, + SourceClusterName: request.SourceClusterName, + }); err != nil { + return err + } + return nil +} + +func (m *sqlExecutionManager) RangeDeleteReplicationTaskFromDLQ( + request *p.RangeDeleteReplicationTaskFromDLQRequest, +) error { + + filter := sqlplugin.ReplicationTasksFilter{ + ShardID: m.shardID, + TaskID: request.ExclusiveBeginTaskID, + InclusiveEndTaskID: request.InclusiveEndTaskID, + } + + if _, err := m.db.RangeDeleteMessageFromReplicationTasksDLQ(&sqlplugin.ReplicationTasksDLQFilter{ + ReplicationTasksFilter: filter, + SourceClusterName: request.SourceClusterName, + }); err != nil { + return err + } + return nil +} + type timerTaskPageToken struct { TaskID int64 Timestamp time.Time diff --git a/common/persistence/sql/sqlplugin/interfaces.go b/common/persistence/sql/sqlplugin/interfaces.go index 95d80bf8f09..80d38b043a4 100644 --- a/common/persistence/sql/sqlplugin/interfaces.go +++ b/common/persistence/sql/sqlplugin/interfaces.go @@ -607,6 +607,12 @@ type ( // SelectFromReplicationTasksDLQ returns one or more rows from replication_tasks_dlq table // Required filter params - {sourceClusterName, shardID, minTaskID, pageSize} SelectFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) ([]ReplicationTasksRow, error) + // DeleteMessageFromReplicationTasksDLQ deletes one row from replication_tasks_dlq table + // Required filter params - {sourceClusterName, shardID, taskID} + DeleteMessageFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) (sql.Result, error) + // RangeDeleteMessageFromReplicationTasksDLQ deletes one or more rows from replication_tasks_dlq table + // Required filter params - {sourceClusterName, shardID, taskID, inclusiveTaskID} + RangeDeleteMessageFromReplicationTasksDLQ(filter *ReplicationTasksDLQFilter) (sql.Result, error) ReplaceIntoActivityInfoMaps(rows []ActivityInfoMapsRow) (sql.Result, error) // SelectFromActivityInfoMaps returns one or more rows from activity_info_maps diff --git a/common/persistence/sql/sqlplugin/mysql/execution.go b/common/persistence/sql/sqlplugin/mysql/execution.go index a0051f06ec9..d49330b0374 100644 --- a/common/persistence/sql/sqlplugin/mysql/execution.go +++ b/common/persistence/sql/sqlplugin/mysql/execution.go @@ -140,6 +140,18 @@ VALUES (:source_cluster_name, :data, :data_encoding) ` + deleteReplicationTaskFromDLQQuery = ` + DELETE FROM replication_tasks_dlq + WHERE source_cluster_name = ? + AND shard_id = ? + AND task_id = ?` + + rangeDeleteReplicationTaskFromDLQQuery = ` + DELETE FROM replication_tasks_dlq + WHERE source_cluster_name = ? + AND shard_id = ? + AND task_id > ? + AND task_id <= ?` ) // InsertIntoExecutions inserts a row into executions table @@ -338,3 +350,30 @@ func (mdb *db) SelectFromReplicationTasksDLQ(filter *sqlplugin.ReplicationTasksD filter.PageSize) return rows, err } + +// DeleteMessageFromReplicationTasksDLQ deletes one row from replication_tasks_dlq table +func (mdb *db) DeleteMessageFromReplicationTasksDLQ( + filter *sqlplugin.ReplicationTasksDLQFilter, +) (sql.Result, error) { + + return mdb.conn.Exec( + deleteReplicationTaskFromDLQQuery, + filter.SourceClusterName, + filter.ShardID, + filter.TaskID, + ) +} + +// DeleteMessageFromReplicationTasksDLQ deletes one or more rows from replication_tasks_dlq table +func (mdb *db) RangeDeleteMessageFromReplicationTasksDLQ( + filter *sqlplugin.ReplicationTasksDLQFilter, +) (sql.Result, error) { + + return mdb.conn.Exec( + rangeDeleteReplicationTaskFromDLQQuery, + filter.SourceClusterName, + filter.ShardID, + filter.TaskID, + filter.InclusiveEndTaskID, + ) +} diff --git a/common/persistence/sql/sqlplugin/postgres/execution.go b/common/persistence/sql/sqlplugin/postgres/execution.go index 19440006127..e9324c3377a 100644 --- a/common/persistence/sql/sqlplugin/postgres/execution.go +++ b/common/persistence/sql/sqlplugin/postgres/execution.go @@ -139,6 +139,18 @@ VALUES (:source_cluster_name, :data, :data_encoding) ` + deleteReplicationTaskFromDLQQuery = ` + DELETE FROM replication_tasks_dlq + WHERE source_cluster_name = $1 + AND shard_id = $2 + AND task_id = $3` + + rangeDeleteReplicationTaskFromDLQQuery = ` + DELETE FROM replication_tasks_dlq + WHERE source_cluster_name = $1 + AND shard_id = $2 + AND task_id > $3 + AND task_id <= $4` ) // InsertIntoExecutions inserts a row into executions table @@ -337,3 +349,30 @@ func (pdb *db) SelectFromReplicationTasksDLQ(filter *sqlplugin.ReplicationTasksD filter.PageSize) return rows, err } + +// DeleteMessageFromReplicationTasksDLQ deletes one row from replication_tasks_dlq table +func (pdb *db) DeleteMessageFromReplicationTasksDLQ( + filter *sqlplugin.ReplicationTasksDLQFilter, +) (sql.Result, error) { + + return pdb.conn.Exec( + deleteReplicationTaskFromDLQQuery, + filter.SourceClusterName, + filter.ShardID, + filter.TaskID, + ) +} + +// DeleteMessageFromReplicationTasksDLQ deletes one or more rows from replication_tasks_dlq table +func (pdb *db) RangeDeleteMessageFromReplicationTasksDLQ( + filter *sqlplugin.ReplicationTasksDLQFilter, +) (sql.Result, error) { + + return pdb.conn.Exec( + rangeDeleteReplicationTaskFromDLQQuery, + filter.SourceClusterName, + filter.ShardID, + filter.TaskID, + filter.InclusiveEndTaskID, + ) +} diff --git a/idls b/idls index d3ddad0b5d9..e6dae5166cf 160000 --- a/idls +++ b/idls @@ -1 +1 @@ -Subproject commit d3ddad0b5d91e651fbf57684c5c68ad4b9ff159f +Subproject commit e6dae5166cf22d4fe9dd401add2fb12bf1f112a9 diff --git a/schema/cassandra/cadence/schema.cql b/schema/cassandra/cadence/schema.cql index d4e6e74c8b3..9b9a2ea694f 100644 --- a/schema/cassandra/cadence/schema.cql +++ b/schema/cassandra/cadence/schema.cql @@ -17,6 +17,8 @@ CREATE TYPE shard ( domain_notification_version bigint, -- the global domain change version this shard is aware of -- Mapping of (remote) cluster to corresponding replication level (last replicated task_id) cluster_replication_level map, + -- Mapping of (remote) cluster to corresponding replication DLQ ack level (last replicated task_id) + replication_dlq_ack_level map, ); --- Workflow execution and mutable state --- diff --git a/schema/cassandra/cadence/versioned/v0.25/manifest.json b/schema/cassandra/cadence/versioned/v0.25/manifest.json new file mode 100644 index 00000000000..8eefbcefb8e --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.25/manifest.json @@ -0,0 +1,8 @@ +{ + "CurrVersion": "0.25", + "MinCompatibleVersion": "0.25", + "Description": "Added replication dlq ack level mapping to shard type", + "SchemaUpdateCqlFiles": [ + "replication_dlq.cql" + ] +} \ No newline at end of file diff --git a/schema/cassandra/cadence/versioned/v0.25/replication_dlq.cql b/schema/cassandra/cadence/versioned/v0.25/replication_dlq.cql new file mode 100644 index 00000000000..727f424e4fc --- /dev/null +++ b/schema/cassandra/cadence/versioned/v0.25/replication_dlq.cql @@ -0,0 +1 @@ +ALTER TYPE shard ADD replication_dlq_ack_level map; \ No newline at end of file diff --git a/schema/cassandra/version.go b/schema/cassandra/version.go index 8a0766d6009..1946b0254ab 100644 --- a/schema/cassandra/version.go +++ b/schema/cassandra/version.go @@ -23,7 +23,7 @@ package cassandra // NOTE: whenever there is a new data base schema update, plz update the following versions // Version is the Cassandra database release version -const Version = "0.24" +const Version = "0.25" // VisibilityVersion is the Cassandra visibility database release version const VisibilityVersion = "0.4" diff --git a/service/frontend/adminHandler.go b/service/frontend/adminHandler.go index 05f71f55d2e..0214614a2b7 100644 --- a/service/frontend/adminHandler.go +++ b/service/frontend/adminHandler.go @@ -823,7 +823,7 @@ func (adh *AdminHandler) ReadDLQMessages( var op func() error switch request.GetType() { case replicator.DLQTypeReplication: - return nil, &gen.BadRequestError{Message: "Not implement."} + return adh.GetHistoryClient().ReadDLQMessages(ctx, request) case replicator.DLQTypeDomain: op = func() error { select { @@ -877,7 +877,7 @@ func (adh *AdminHandler) PurgeDLQMessages( var op func() error switch request.GetType() { case replicator.DLQTypeReplication: - return &gen.BadRequestError{Message: "Not implement."} + return adh.GetHistoryClient().PurgeDLQMessages(ctx, request) case replicator.DLQTypeDomain: op = func() error { select { @@ -926,7 +926,7 @@ func (adh *AdminHandler) MergeDLQMessages( var op func() error switch request.GetType() { case replicator.DLQTypeReplication: - return nil, &gen.BadRequestError{Message: "Not implement."} + return adh.GetHistoryClient().MergeDLQMessages(ctx, request) case replicator.DLQTypeDomain: op = func() error { diff --git a/service/history/handler.go b/service/history/handler.go index 0ddf07a3c37..b516227db64 100644 --- a/service/history/handler.go +++ b/service/history/handler.go @@ -1608,6 +1608,72 @@ func (h *Handler) ReapplyEvents( return nil } +// ReadDLQMessages reads replication DLQ messages +func (h *Handler) ReadDLQMessages( + ctx context.Context, + request *r.ReadDLQMessagesRequest, +) (resp *r.ReadDLQMessagesResponse, retError error) { + + defer log.CapturePanic(h.GetLogger(), &retError) + h.startWG.Wait() + + scope := metrics.HistoryReadDLQMessagesScope + h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests) + sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency) + defer sw.Stop() + + engine, err := h.controller.getEngineForShard(int(request.GetShardID())) + if err != nil { + return nil, h.error(err, scope, "", "") + } + + return engine.ReadDLQMessages(ctx, request) +} + +// PurgeDLQMessages deletes replication DLQ messages +func (h *Handler) PurgeDLQMessages( + ctx context.Context, + request *r.PurgeDLQMessagesRequest, +) (retError error) { + + defer log.CapturePanic(h.GetLogger(), &retError) + h.startWG.Wait() + + scope := metrics.HistoryPurgeDLQMessagesScope + h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests) + sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency) + defer sw.Stop() + + engine, err := h.controller.getEngineForShard(int(request.GetShardID())) + if err != nil { + return h.error(err, scope, "", "") + } + + return engine.PurgeDLQMessages(ctx, request) +} + +// MergeDLQMessages reads and applies replication DLQ messages +func (h *Handler) MergeDLQMessages( + ctx context.Context, + request *r.MergeDLQMessagesRequest, +) (resp *r.MergeDLQMessagesResponse, retError error) { + + defer log.CapturePanic(h.GetLogger(), &retError) + h.startWG.Wait() + + scope := metrics.HistoryMergeDLQMessagesScope + h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests) + sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency) + defer sw.Stop() + + engine, err := h.controller.getEngineForShard(int(request.GetShardID())) + if err != nil { + return nil, h.error(err, scope, "", "") + } + + return engine.MergeDLQMessages(ctx, request) +} + // RefreshWorkflowTasks refreshes all the tasks of a workflow func (h *Handler) RefreshWorkflowTasks( ctx context.Context, @@ -1617,7 +1683,6 @@ func (h *Handler) RefreshWorkflowTasks( h.GetMetricsClient().IncCounter(scope, metrics.CadenceRequests) sw := h.GetMetricsClient().StartTimer(scope, metrics.CadenceLatency) defer sw.Stop() - domainID := request.GetDomainUIID() execution := request.GetRequest().GetExecution() workflowID := execution.GetWorkflowId() diff --git a/service/history/historyEngine.go b/service/history/historyEngine.go index a4e49b9575f..eb1cc58a046 100644 --- a/service/history/historyEngine.go +++ b/service/history/historyEngine.go @@ -54,6 +54,7 @@ import ( "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/persistence" "github.com/uber/cadence/common/service/config" + "github.com/uber/cadence/common/xdc" warchiver "github.com/uber/cadence/service/worker/archiver" ) @@ -102,6 +103,9 @@ type ( GetDLQReplicationMessages(ctx ctx.Context, taskInfos []*r.ReplicationTaskInfo) ([]*r.ReplicationTask, error) QueryWorkflow(ctx ctx.Context, request *h.QueryWorkflowRequest) (*h.QueryWorkflowResponse, error) ReapplyEvents(ctx ctx.Context, domainUUID string, workflowID string, runID string, events []*workflow.HistoryEvent) error + ReadDLQMessages(ctx ctx.Context, messagesRequest *r.ReadDLQMessagesRequest) (*r.ReadDLQMessagesResponse, error) + PurgeDLQMessages(ctx ctx.Context, messagesRequest *r.PurgeDLQMessagesRequest) error + MergeDLQMessages(ctx ctx.Context, messagesRequest *r.MergeDLQMessagesRequest) (*r.MergeDLQMessagesResponse, error) RefreshWorkflowTasks(ctx ctx.Context, domainUUID string, execution workflow.WorkflowExecution) error NotifyNewHistoryEvent(event *historyEventNotification) @@ -141,6 +145,7 @@ type ( matchingClient matching.Client rawMatchingClient matching.Client clientChecker client.VersionChecker + replicationDLQHandler replicationDLQHandler } ) @@ -285,19 +290,50 @@ func NewEngineWithShardContext( ) historyEngImpl.decisionHandler = newDecisionHandler(historyEngImpl) + nDCHistoryResender := xdc.NewNDCHistoryResender( + shard.GetDomainCache(), + shard.GetService().GetClientBean().GetRemoteAdminClient(currentClusterName), + func(ctx context.Context, request *h.ReplicateEventsV2Request) error { + return shard.GetService().GetHistoryClient().ReplicateEventsV2(ctx, request) + }, + shard.GetService().GetPayloadSerializer(), + shard.GetLogger(), + ) + historyRereplicator := xdc.NewHistoryRereplicator( + currentClusterName, + shard.GetDomainCache(), + shard.GetService().GetClientBean().GetRemoteAdminClient(currentClusterName), + func(ctx context.Context, request *h.ReplicateRawEventsRequest) error { + return shard.GetService().GetHistoryClient().ReplicateRawEvents(ctx, request) + }, + shard.GetService().GetPayloadSerializer(), + replicationTimeout, + shard.GetLogger(), + ) + replicationTaskExecutor := newReplicationTaskExecutor( + currentClusterName, + shard.GetDomainCache(), + nDCHistoryResender, + historyRereplicator, + historyEngImpl, + shard.GetMetricsClient(), + shard.GetLogger(), + ) var replicationTaskProcessors []ReplicationTaskProcessor for _, replicationTaskFetcher := range replicationTaskFetchers.GetFetchers() { replicationTaskProcessor := NewReplicationTaskProcessor( shard, historyEngImpl, config, - historyClient, shard.GetMetricsClient(), replicationTaskFetcher, + replicationTaskExecutor, ) replicationTaskProcessors = append(replicationTaskProcessors, replicationTaskProcessor) } historyEngImpl.replicationTaskProcessors = replicationTaskProcessors + replicationMessageHandler := newReplicationDLQHandler(shard, replicationTaskExecutor) + historyEngImpl.replicationDLQHandler = replicationMessageHandler shard.SetEngine(historyEngImpl) return historyEngImpl @@ -2900,6 +2936,59 @@ func (e *historyEngineImpl) ReapplyEvents( }) } +func (e *historyEngineImpl) ReadDLQMessages( + ctx context.Context, + request *r.ReadDLQMessagesRequest, +) (*r.ReadDLQMessagesResponse, error) { + + tasks, token, err := e.replicationDLQHandler.readMessages( + ctx, + request.GetSourceCluster(), + request.GetInclusiveEndMessageID(), + int(request.GetMaximumPageSize()), + request.GetNextPageToken(), + ) + if err != nil { + return nil, err + } + return &r.ReadDLQMessagesResponse{ + Type: request.GetType().Ptr(), + ReplicationTasks: tasks, + NextPageToken: token, + }, nil +} + +func (e *historyEngineImpl) PurgeDLQMessages( + ctx context.Context, + request *r.PurgeDLQMessagesRequest, +) error { + + return e.replicationDLQHandler.purgeMessages( + request.GetSourceCluster(), + request.GetInclusiveEndMessageID(), + ) +} + +func (e *historyEngineImpl) MergeDLQMessages( + ctx context.Context, + request *r.MergeDLQMessagesRequest, +) (*r.MergeDLQMessagesResponse, error) { + + token, err := e.replicationDLQHandler.mergeMessages( + ctx, + request.GetSourceCluster(), + request.GetInclusiveEndMessageID(), + int(request.GetMaximumPageSize()), + request.GetNextPageToken(), + ) + if err != nil { + return nil, err + } + return &r.MergeDLQMessagesResponse{ + NextPageToken: token, + }, nil +} + func (e *historyEngineImpl) RefreshWorkflowTasks( ctx ctx.Context, domainUUID string, diff --git a/service/history/historyEngine_mock.go b/service/history/historyEngine_mock.go index 981df6fa5af..2141fced193 100644 --- a/service/history/historyEngine_mock.go +++ b/service/history/historyEngine_mock.go @@ -535,6 +535,29 @@ func (mr *MockEngineMockRecorder) ReapplyEvents(ctx, domainUUID, workflowID, run return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReapplyEvents", reflect.TypeOf((*MockEngine)(nil).ReapplyEvents), ctx, domainUUID, workflowID, runID, events) } +// ReadDLQMessages mocks base method +func (m *MockEngine) ReadDLQMessages(ctx context.Context, messagesRequest *replicator.ReadDLQMessagesRequest) (*replicator.ReadDLQMessagesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ReadDLQMessages", ctx, messagesRequest) + ret0, _ := ret[0].(*replicator.ReadDLQMessagesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ReadDLQMessages indicates an expected call of ReadDLQMessages +func (mr *MockEngineMockRecorder) ReadDLQMessages(ctx, messagesRequest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDLQMessages", reflect.TypeOf((*MockEngine)(nil).ReadDLQMessages), ctx, messagesRequest) +} + +// PurgeDLQMessages mocks base method +func (m *MockEngine) PurgeDLQMessages(ctx context.Context, messagesRequest *replicator.PurgeDLQMessagesRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "PurgeDLQMessages", ctx, messagesRequest) + ret0, _ := ret[0].(error) + return ret0 +} + // RefreshWorkflowTasks mocks base method func (m *MockEngine) RefreshWorkflowTasks(ctx context.Context, domainUUID string, execution shared.WorkflowExecution) error { m.ctrl.T.Helper() @@ -543,6 +566,27 @@ func (m *MockEngine) RefreshWorkflowTasks(ctx context.Context, domainUUID string return ret0 } +// PurgeDLQMessages indicates an expected call of PurgeDLQMessages +func (mr *MockEngineMockRecorder) PurgeDLQMessages(ctx, messagesRequest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "PurgeDLQMessages", reflect.TypeOf((*MockEngine)(nil).PurgeDLQMessages), ctx, messagesRequest) +} + +// MergeDLQMessages mocks base method +func (m *MockEngine) MergeDLQMessages(ctx context.Context, messagesRequest *replicator.MergeDLQMessagesRequest) (*replicator.MergeDLQMessagesResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "MergeDLQMessages", ctx, messagesRequest) + ret0, _ := ret[0].(*replicator.MergeDLQMessagesResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// MergeDLQMessages indicates an expected call of MergeDLQMessages +func (mr *MockEngineMockRecorder) MergeDLQMessages(ctx, messagesRequest interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "MergeDLQMessages", reflect.TypeOf((*MockEngine)(nil).MergeDLQMessages), ctx, messagesRequest) +} + // RefreshWorkflowTasks indicates an expected call of RefreshWorkflowTasks func (mr *MockEngineMockRecorder) RefreshWorkflowTasks(ctx, domainUUID, execution interface{}) *gomock.Call { mr.mock.ctrl.T.Helper() diff --git a/service/history/replicationDLQHandler.go b/service/history/replicationDLQHandler.go new file mode 100644 index 00000000000..075512f930c --- /dev/null +++ b/service/history/replicationDLQHandler.go @@ -0,0 +1,216 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replicationMessageHandler_mock.go -self_package github.com/uber/cadence/service/history + +package history + +import ( + "context" + + "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/persistence" +) + +type ( + // replicationDLQHandler is the interface handles replication DLQ messages + replicationDLQHandler interface { + readMessages( + ctx context.Context, + sourceCluster string, + lastMessageID int64, + pageSize int, + pageToken []byte, + ) ([]*replicator.ReplicationTask, []byte, error) + purgeMessages( + sourceCluster string, + lastMessageID int64, + ) error + mergeMessages( + ctx context.Context, + sourceCluster string, + lastMessageID int64, + pageSize int, + pageToken []byte, + ) ([]byte, error) + } + + replicationDLQHandlerImpl struct { + replicationTaskExecutor replicationTaskExecutor + shard ShardContext + logger log.Logger + } +) + +func newReplicationDLQHandler( + shard ShardContext, + replicationTaskExecutor replicationTaskExecutor, +) replicationDLQHandler { + + return &replicationDLQHandlerImpl{ + shard: shard, + replicationTaskExecutor: replicationTaskExecutor, + logger: shard.GetLogger(), + } +} + +func (r *replicationDLQHandlerImpl) readMessages( + ctx context.Context, + sourceCluster string, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*replicator.ReplicationTask, []byte, error) { + + tasks, _, token, err := r.readMessagesWithAckLevel( + ctx, + sourceCluster, + lastMessageID, + pageSize, + pageToken, + ) + return tasks, token, err +} + +func (r *replicationDLQHandlerImpl) readMessagesWithAckLevel( + ctx context.Context, + sourceCluster string, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]*replicator.ReplicationTask, int64, []byte, error) { + + ackLevel := r.shard.GetReplicatorDLQAckLevel(sourceCluster) + resp, err := r.shard.GetExecutionManager().GetReplicationTasksFromDLQ(&persistence.GetReplicationTasksFromDLQRequest{ + SourceClusterName: sourceCluster, + GetReplicationTasksRequest: persistence.GetReplicationTasksRequest{ + ReadLevel: ackLevel, + MaxReadLevel: lastMessageID, + BatchSize: pageSize, + NextPageToken: pageToken, + }, + }) + if err != nil { + return nil, ackLevel, nil, err + } + + remoteAdminClient := r.shard.GetService().GetClientBean().GetRemoteAdminClient(sourceCluster) + taskInfo := make([]*replicator.ReplicationTaskInfo, len(resp.Tasks)) + for _, task := range resp.Tasks { + taskInfo = append(taskInfo, &replicator.ReplicationTaskInfo{ + DomainID: common.StringPtr(task.GetDomainID()), + WorkflowID: common.StringPtr(task.GetWorkflowID()), + RunID: common.StringPtr(task.GetRunID()), + TaskType: common.Int16Ptr(int16(task.GetTaskType())), + TaskID: common.Int64Ptr(task.GetTaskID()), + Version: common.Int64Ptr(task.GetVersion()), + FirstEventID: common.Int64Ptr(task.FirstEventID), + NextEventID: common.Int64Ptr(task.NextEventID), + ScheduledID: common.Int64Ptr(task.ScheduledID), + }) + } + dlqResponse, err := remoteAdminClient.GetDLQReplicationMessages( + ctx, + &replicator.GetDLQReplicationMessagesRequest{ + TaskInfos: taskInfo, + }, + ) + if err != nil { + return nil, ackLevel, nil, err + } + return dlqResponse.ReplicationTasks, ackLevel, resp.NextPageToken, nil +} + +func (r *replicationDLQHandlerImpl) purgeMessages( + sourceCluster string, + lastMessageID int64, +) error { + + ackLevel := r.shard.GetReplicatorDLQAckLevel(sourceCluster) + err := r.shard.GetExecutionManager().RangeDeleteReplicationTaskFromDLQ( + &persistence.RangeDeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + ExclusiveBeginTaskID: ackLevel, + InclusiveEndTaskID: lastMessageID, + }, + ) + if err != nil { + return err + } + + if err = r.shard.UpdateReplicatorDLQAckLevel( + sourceCluster, + lastMessageID, + ); err != nil { + r.logger.Error("Failed to purge history replication message", tag.Error(err)) + // The update ack level should not block the call. Ignore the error. + } + return nil +} + +func (r *replicationDLQHandlerImpl) mergeMessages( + ctx context.Context, + sourceCluster string, + lastMessageID int64, + pageSize int, + pageToken []byte, +) ([]byte, error) { + + tasks, ackLevel, token, err := r.readMessagesWithAckLevel( + ctx, + sourceCluster, + lastMessageID, + pageSize, + pageToken, + ) + + for _, task := range tasks { + if _, err := r.replicationTaskExecutor.execute( + sourceCluster, + task, + true, + ); err != nil { + return nil, err + } + } + + err = r.shard.GetExecutionManager().RangeDeleteReplicationTaskFromDLQ( + &persistence.RangeDeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + ExclusiveBeginTaskID: ackLevel, + InclusiveEndTaskID: lastMessageID, + }, + ) + if err != nil { + return nil, err + } + + if err = r.shard.UpdateReplicatorDLQAckLevel( + sourceCluster, + lastMessageID, + ); err != nil { + r.logger.Error("Failed to purge history replication message", tag.Error(err)) + // The update ack level should not block the call. Ignore the error. + } + return token, nil +} diff --git a/service/history/replicationDLQHandler_test.go b/service/history/replicationDLQHandler_test.go new file mode 100644 index 00000000000..183723f663b --- /dev/null +++ b/service/history/replicationDLQHandler_test.go @@ -0,0 +1,228 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "context" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + + "github.com/uber/cadence/.gen/go/admin/adminservicetest" + "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/client" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/cluster" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/mocks" + "github.com/uber/cadence/common/persistence" + "github.com/uber/cadence/common/resource" +) + +type ( + replicationDLQHandlerSuite struct { + suite.Suite + *require.Assertions + controller *gomock.Controller + + mockResource *resource.Test + mockShard ShardContext + config *Config + mockClientBean *client.MockBean + adminClient *adminservicetest.MockClient + clusterMetadata *cluster.MockMetadata + executionManager *mocks.ExecutionManager + shardManager *mocks.ShardManager + replicatorTaskExecutor *MockreplicationTaskExecutor + + replicationMessageHandler *replicationDLQHandlerImpl + } +) + +func TestReplicationMessageHandlerSuite(t *testing.T) { + s := new(replicationDLQHandlerSuite) + suite.Run(t, s) +} + +func (s *replicationDLQHandlerSuite) SetupSuite() { + +} + +func (s *replicationDLQHandlerSuite) TearDownSuite() { + +} + +func (s *replicationDLQHandlerSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + + s.mockResource = resource.NewTest(s.controller, metrics.History) + s.mockClientBean = s.mockResource.ClientBean + s.adminClient = s.mockResource.RemoteAdminClient + s.clusterMetadata = s.mockResource.ClusterMetadata + s.executionManager = s.mockResource.ExecutionMgr + s.shardManager = s.mockResource.ShardMgr + logger := log.NewNoop() + s.mockShard = &shardContextImpl{ + shardID: 0, + Resource: s.mockResource, + shardInfo: &persistence.ShardInfo{ + ShardID: 0, + RangeID: 1, + ReplicationDLQAckLevel: map[string]int64{"test": -1}}, + transferSequenceNumber: 1, + maxTransferSequenceNumber: 100000, + closeCh: make(chan int, 100), + config: NewDynamicConfigForTest(), + logger: logger, + remoteClusterCurrentTime: make(map[string]time.Time), + executionManager: s.executionManager, + } + s.config = NewDynamicConfigForTest() + s.clusterMetadata.EXPECT().GetCurrentClusterName().Return("active").AnyTimes() + s.replicatorTaskExecutor = NewMockreplicationTaskExecutor(s.controller) + + s.replicationMessageHandler = newReplicationDLQHandler( + s.mockShard, + s.replicatorTaskExecutor, + ).(*replicationDLQHandlerImpl) +} + +func (s *replicationDLQHandlerSuite) TearDownTest() { + s.controller.Finish() + s.mockResource.Finish(s.T()) +} + +func (s *replicationDLQHandlerSuite) TestReadMessages_OK() { + ctx := context.Background() + sourceCluster := "test" + lastMessageID := int64(1) + pageSize := 1 + pageToken := []byte{} + + resp := &persistence.GetReplicationTasksFromDLQResponse{ + Tasks: []*persistence.ReplicationTaskInfo{ + { + DomainID: uuid.New(), + WorkflowID: uuid.New(), + RunID: uuid.New(), + TaskType: 0, + TaskID: 1, + }, + }, + } + s.executionManager.On("GetReplicationTasksFromDLQ", &persistence.GetReplicationTasksFromDLQRequest{ + SourceClusterName: sourceCluster, + GetReplicationTasksRequest: persistence.GetReplicationTasksRequest{ + ReadLevel: -1, + MaxReadLevel: lastMessageID, + BatchSize: pageSize, + NextPageToken: pageToken, + }, + }).Return(resp, nil).Times(1) + + s.mockClientBean.EXPECT().GetRemoteAdminClient(sourceCluster).Return(s.adminClient).AnyTimes() + s.adminClient.EXPECT(). + GetDLQReplicationMessages(ctx, gomock.Any()). + Return(&replicator.GetDLQReplicationMessagesResponse{}, nil) + tasks, token, err := s.replicationMessageHandler.readMessages(ctx, sourceCluster, lastMessageID, pageSize, pageToken) + s.NoError(err) + s.Nil(token) + s.Nil(tasks) +} + +func (s *replicationDLQHandlerSuite) TestPurgeMessages_OK() { + sourceCluster := "test" + lastMessageID := int64(1) + + s.executionManager.On("RangeDeleteReplicationTaskFromDLQ", + &persistence.RangeDeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + ExclusiveBeginTaskID: -1, + InclusiveEndTaskID: lastMessageID, + }).Return(nil).Times(1) + + s.shardManager.On("UpdateShard", mock.Anything).Return(nil) + err := s.replicationMessageHandler.purgeMessages(sourceCluster, lastMessageID) + s.NoError(err) +} + +func (s *replicationDLQHandlerSuite) TestMergeMessages_OK() { + ctx := context.Background() + sourceCluster := "test" + lastMessageID := int64(1) + pageSize := 1 + pageToken := []byte{} + + resp := &persistence.GetReplicationTasksFromDLQResponse{ + Tasks: []*persistence.ReplicationTaskInfo{ + { + DomainID: uuid.New(), + WorkflowID: uuid.New(), + RunID: uuid.New(), + TaskType: 0, + TaskID: 1, + }, + }, + } + s.executionManager.On("GetReplicationTasksFromDLQ", &persistence.GetReplicationTasksFromDLQRequest{ + SourceClusterName: sourceCluster, + GetReplicationTasksRequest: persistence.GetReplicationTasksRequest{ + ReadLevel: -1, + MaxReadLevel: lastMessageID, + BatchSize: pageSize, + NextPageToken: pageToken, + }, + }).Return(resp, nil).Times(1) + + s.mockClientBean.EXPECT().GetRemoteAdminClient(sourceCluster).Return(s.adminClient).AnyTimes() + replicationTask := &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeHistory.Ptr(), + SourceTaskId: common.Int64Ptr(lastMessageID), + HistoryTaskAttributes: &replicator.HistoryTaskAttributes{}, + } + s.adminClient.EXPECT(). + GetDLQReplicationMessages(ctx, gomock.Any()). + Return(&replicator.GetDLQReplicationMessagesResponse{ + ReplicationTasks: []*replicator.ReplicationTask{ + replicationTask, + }, + }, nil) + s.replicatorTaskExecutor.EXPECT().execute(sourceCluster, replicationTask, true).Return(0, nil).Times(1) + s.executionManager.On("RangeDeleteReplicationTaskFromDLQ", + &persistence.RangeDeleteReplicationTaskFromDLQRequest{ + SourceClusterName: sourceCluster, + ExclusiveBeginTaskID: -1, + InclusiveEndTaskID: lastMessageID, + }).Return(nil).Times(1) + + s.shardManager.On("UpdateShard", mock.Anything).Return(nil) + + token, err := s.replicationMessageHandler.mergeMessages(ctx, sourceCluster, lastMessageID, pageSize, pageToken) + s.NoError(err) + s.Nil(token) +} diff --git a/service/history/replicationTaskExecutor.go b/service/history/replicationTaskExecutor.go new file mode 100644 index 00000000000..a7cb6466187 --- /dev/null +++ b/service/history/replicationTaskExecutor.go @@ -0,0 +1,343 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination replicationTaskExecutor_mock.go -self_package github.com/uber/cadence/service/history + +package history + +import ( + "context" + + "github.com/uber/cadence/.gen/go/history" + r "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/cache" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/log/tag" + "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/xdc" +) + +type ( + replicationTaskExecutor interface { + execute(sourceCluster string, replicationTask *r.ReplicationTask, forceApply bool) (int, error) + } + + replicationTaskExecutorImpl struct { + currentCluster string + domainCache cache.DomainCache + nDCHistoryResender xdc.NDCHistoryResender + historyRereplicator xdc.HistoryRereplicator + historyEngine Engine + + metricsClient metrics.Client + logger log.Logger + } +) + +// newReplicationTaskExecutor creates an replication task executor +// The executor uses by 1) DLQ replication task handler 2) history replication task processor +func newReplicationTaskExecutor( + currentCluster string, + domainCache cache.DomainCache, + nDCHistoryResender xdc.NDCHistoryResender, + historyRereplicator xdc.HistoryRereplicator, + historyEngine Engine, + metricsClient metrics.Client, + logger log.Logger, +) replicationTaskExecutor { + return &replicationTaskExecutorImpl{ + currentCluster: currentCluster, + domainCache: domainCache, + nDCHistoryResender: nDCHistoryResender, + historyRereplicator: historyRereplicator, + historyEngine: historyEngine, + metricsClient: metricsClient, + logger: logger, + } +} + +func (e *replicationTaskExecutorImpl) execute( + sourceCluster string, + replicationTask *r.ReplicationTask, + forceApply bool, +) (int, error) { + + var err error + var scope int + switch replicationTask.GetTaskType() { + case r.ReplicationTaskTypeSyncShardStatus: + // Shard status will be sent as part of the Replication message without kafka + scope = metrics.SyncShardTaskScope + case r.ReplicationTaskTypeSyncActivity: + scope = metrics.SyncActivityTaskScope + err = e.handleActivityTask(replicationTask, forceApply) + case r.ReplicationTaskTypeHistory: + scope = metrics.HistoryReplicationTaskScope + err = e.handleHistoryReplicationTask(sourceCluster, replicationTask, forceApply) + case r.ReplicationTaskTypeHistoryMetadata: + // Without kafka we should not have size limits so we don't necessary need this in the new replication scheme. + scope = metrics.HistoryMetadataReplicationTaskScope + case r.ReplicationTaskTypeHistoryV2: + scope = metrics.HistoryReplicationV2TaskScope + err = e.handleHistoryReplicationTaskV2(replicationTask, forceApply) + default: + e.logger.Error("Unknown task type.") + scope = metrics.ReplicatorScope + err = ErrUnknownReplicationTask + } + + return scope, err +} + +func (e *replicationTaskExecutorImpl) handleActivityTask( + task *r.ReplicationTask, + forceApply bool, +) error { + + attr := task.SyncActivityTaskAttributes + doContinue, err := e.filterTask(attr.GetDomainId(), forceApply) + if err != nil || !doContinue { + return err + } + + request := &history.SyncActivityRequest{ + DomainId: attr.DomainId, + WorkflowId: attr.WorkflowId, + RunId: attr.RunId, + Version: attr.Version, + ScheduledId: attr.ScheduledId, + ScheduledTime: attr.ScheduledTime, + StartedId: attr.StartedId, + StartedTime: attr.StartedTime, + LastHeartbeatTime: attr.LastHeartbeatTime, + Details: attr.Details, + Attempt: attr.Attempt, + LastFailureReason: attr.LastFailureReason, + LastWorkerIdentity: attr.LastWorkerIdentity, + VersionHistory: attr.GetVersionHistory(), + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + err = e.historyEngine.SyncActivity(ctx, request) + // Handle resend error + retryV2Err, okV2 := e.convertRetryTaskV2Error(err) + //TODO: remove handling retry error v1 after 2DC deprecation + retryV1Err, okV1 := e.convertRetryTaskError(err) + + if !okV1 && !okV2 { + return err + } else if okV1 { + if retryV1Err.GetRunId() == "" { + return err + } + e.metricsClient.IncCounter(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientRequests) + stopwatch := e.metricsClient.StartTimer(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientLatency) + defer stopwatch.Stop() + + // this is the retry error + if resendErr := e.historyRereplicator.SendMultiWorkflowHistory( + attr.GetDomainId(), + attr.GetWorkflowId(), + retryV1Err.GetRunId(), + retryV1Err.GetNextEventId(), + attr.GetRunId(), + attr.GetScheduledId()+1, // the next event ID should be at activity schedule ID + 1 + ); resendErr != nil { + e.logger.Error("error resend history for sync activity", tag.Error(resendErr)) + // should return the replication error, not the resending error + return err + } + } else if okV2 { + e.metricsClient.IncCounter(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientRequests) + stopwatch := e.metricsClient.StartTimer(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientLatency) + defer stopwatch.Stop() + + if resendErr := e.nDCHistoryResender.SendSingleWorkflowHistory( + retryV2Err.GetDomainId(), + retryV2Err.GetWorkflowId(), + retryV2Err.GetRunId(), + retryV2Err.StartEventId, + retryV2Err.StartEventVersion, + retryV2Err.EndEventId, + retryV2Err.EndEventVersion, + ); resendErr != nil { + e.logger.Error("error resend history for sync activity", tag.Error(resendErr)) + // should return the replication error, not the resending error + return err + } + } + // should try again after back fill the history + return e.historyEngine.SyncActivity(ctx, request) +} + +//TODO: remove this part after 2DC deprecation +func (e *replicationTaskExecutorImpl) handleHistoryReplicationTask( + sourceCluster string, + task *r.ReplicationTask, + forceApply bool, +) error { + + attr := task.HistoryTaskAttributes + doContinue, err := e.filterTask(attr.GetDomainId(), forceApply) + if err != nil || !doContinue { + return err + } + + request := &history.ReplicateEventsRequest{ + SourceCluster: common.StringPtr(sourceCluster), + DomainUUID: attr.DomainId, + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: attr.WorkflowId, + RunId: attr.RunId, + }, + FirstEventId: attr.FirstEventId, + NextEventId: attr.NextEventId, + Version: attr.Version, + ReplicationInfo: attr.ReplicationInfo, + History: attr.History, + NewRunHistory: attr.NewRunHistory, + ForceBufferEvents: common.BoolPtr(false), + ResetWorkflow: attr.ResetWorkflow, + NewRunNDC: attr.NewRunNDC, + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + + err = e.historyEngine.ReplicateEvents(ctx, request) + retryErr, ok := e.convertRetryTaskError(err) + if !ok || retryErr.GetRunId() == "" { + return err + } + + e.metricsClient.IncCounter(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientRequests) + stopwatch := e.metricsClient.StartTimer(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientLatency) + defer stopwatch.Stop() + + resendErr := e.historyRereplicator.SendMultiWorkflowHistory( + attr.GetDomainId(), + attr.GetWorkflowId(), + retryErr.GetRunId(), + retryErr.GetNextEventId(), + attr.GetRunId(), + attr.GetFirstEventId(), + ) + if resendErr != nil { + e.logger.Error("error resend history for history event", tag.Error(resendErr)) + // should return the replication error, not the resending error + return err + } + + return e.historyEngine.ReplicateEvents(ctx, request) +} + +func (e *replicationTaskExecutorImpl) handleHistoryReplicationTaskV2( + task *r.ReplicationTask, + forceApply bool, +) error { + + attr := task.HistoryTaskV2Attributes + doContinue, err := e.filterTask(attr.GetDomainId(), forceApply) + if err != nil || !doContinue { + return err + } + + request := &history.ReplicateEventsV2Request{ + DomainUUID: attr.DomainId, + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: attr.WorkflowId, + RunId: attr.RunId, + }, + VersionHistoryItems: attr.VersionHistoryItems, + Events: attr.Events, + // new run events does not need version history since there is no prior events + NewRunEvents: attr.NewRunEvents, + } + ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) + defer cancel() + + err = e.historyEngine.ReplicateEventsV2(ctx, request) + retryErr, ok := e.convertRetryTaskV2Error(err) + if !ok { + return err + } + e.metricsClient.IncCounter(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientRequests) + stopwatch := e.metricsClient.StartTimer(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientLatency) + defer stopwatch.Stop() + + if resendErr := e.nDCHistoryResender.SendSingleWorkflowHistory( + retryErr.GetDomainId(), + retryErr.GetWorkflowId(), + retryErr.GetRunId(), + retryErr.StartEventId, + retryErr.StartEventVersion, + retryErr.EndEventId, + retryErr.EndEventVersion, + ); resendErr != nil { + e.logger.Error("error resend history for history event v2", tag.Error(resendErr)) + // should return the replication error, not the resending error + return err + } + + return e.historyEngine.ReplicateEventsV2(ctx, request) +} + +func (e *replicationTaskExecutorImpl) filterTask( + domainID string, + forceApply bool, +) (bool, error) { + + if forceApply { + return true, nil + } + + domainEntry, err := e.domainCache.GetDomainByID(domainID) + if err != nil { + return false, err + } + + shouldProcessTask := false +FilterLoop: + for _, targetCluster := range domainEntry.GetReplicationConfig().Clusters { + if e.currentCluster == targetCluster.ClusterName { + shouldProcessTask = true + break FilterLoop + } + } + return shouldProcessTask, nil +} + +//TODO: remove this code after 2DC deprecation +func (e *replicationTaskExecutorImpl) convertRetryTaskError( + err error, +) (*shared.RetryTaskError, bool) { + + retError, ok := err.(*shared.RetryTaskError) + return retError, ok +} + +func (e *replicationTaskExecutorImpl) convertRetryTaskV2Error( + err error, +) (*shared.RetryTaskV2Error, bool) { + + retError, ok := err.(*shared.RetryTaskV2Error) + return retError, ok +} diff --git a/service/history/replicationTaskExecutor_mock.go b/service/history/replicationTaskExecutor_mock.go new file mode 100644 index 00000000000..44633e0e3bb --- /dev/null +++ b/service/history/replicationTaskExecutor_mock.go @@ -0,0 +1,74 @@ +// The MIT License (MIT) +// +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. +// + +// Code generated by MockGen. DO NOT EDIT. +// Source: replicationTaskExecutor.go + +// Package history is a generated GoMock package. +package history + +import ( + reflect "reflect" + + gomock "github.com/golang/mock/gomock" + + replicator "github.com/uber/cadence/.gen/go/replicator" +) + +// MockreplicationTaskExecutor is a mock of replicationTaskExecutor interface +type MockreplicationTaskExecutor struct { + ctrl *gomock.Controller + recorder *MockreplicationTaskExecutorMockRecorder +} + +// MockreplicationTaskExecutorMockRecorder is the mock recorder for MockreplicationTaskExecutor +type MockreplicationTaskExecutorMockRecorder struct { + mock *MockreplicationTaskExecutor +} + +// NewMockreplicationTaskExecutor creates a new mock instance +func NewMockreplicationTaskExecutor(ctrl *gomock.Controller) *MockreplicationTaskExecutor { + mock := &MockreplicationTaskExecutor{ctrl: ctrl} + mock.recorder = &MockreplicationTaskExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use +func (m *MockreplicationTaskExecutor) EXPECT() *MockreplicationTaskExecutorMockRecorder { + return m.recorder +} + +// execute mocks base method +func (m *MockreplicationTaskExecutor) execute(sourceCluster string, replicationTask *replicator.ReplicationTask, forceApply bool) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "execute", sourceCluster, replicationTask, forceApply) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// execute indicates an expected call of execute +func (mr *MockreplicationTaskExecutorMockRecorder) execute(sourceCluster, replicationTask, forceApply interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "execute", reflect.TypeOf((*MockreplicationTaskExecutor)(nil).execute), sourceCluster, replicationTask, forceApply) +} diff --git a/service/history/replicationTaskExecutor_test.go b/service/history/replicationTaskExecutor_test.go new file mode 100644 index 00000000000..3278cae5cb5 --- /dev/null +++ b/service/history/replicationTaskExecutor_test.go @@ -0,0 +1,275 @@ +// Copyright (c) 2020 Uber Technologies, Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package history + +import ( + "fmt" + "testing" + "time" + + "github.com/golang/mock/gomock" + "github.com/pborman/uuid" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + "github.com/uber-go/tally" + + "github.com/uber/cadence/.gen/go/admin/adminservicetest" + "github.com/uber/cadence/.gen/go/history" + "github.com/uber/cadence/.gen/go/history/historyservicetest" + "github.com/uber/cadence/.gen/go/replicator" + "github.com/uber/cadence/.gen/go/shared" + "github.com/uber/cadence/client" + "github.com/uber/cadence/common" + "github.com/uber/cadence/common/cache" + "github.com/uber/cadence/common/cluster" + "github.com/uber/cadence/common/log" + "github.com/uber/cadence/common/metrics" + "github.com/uber/cadence/common/mocks" + "github.com/uber/cadence/common/persistence" + "github.com/uber/cadence/common/resource" + "github.com/uber/cadence/common/xdc" +) + +type ( + replicationTaskExecutorSuite struct { + suite.Suite + *require.Assertions + controller *gomock.Controller + + currentCluster string + mockResource *resource.Test + mockShard ShardContext + mockEngine *MockEngine + config *Config + historyClient *historyservicetest.MockClient + mockDomainCache *cache.MockDomainCache + mockClientBean *client.MockBean + adminClient *adminservicetest.MockClient + clusterMetadata *cluster.MockMetadata + executionManager *mocks.ExecutionManager + nDCHistoryResender *xdc.MockNDCHistoryResender + historyRereplicator *xdc.MockHistoryRereplicator + + replicationTaskHandler *replicationTaskExecutorImpl + } +) + +func TestReplicationTaskExecutorSuite(t *testing.T) { + s := new(replicationTaskExecutorSuite) + suite.Run(t, s) +} + +func (s *replicationTaskExecutorSuite) SetupSuite() { + +} + +func (s *replicationTaskExecutorSuite) TearDownSuite() { + +} + +func (s *replicationTaskExecutorSuite) SetupTest() { + s.Assertions = require.New(s.T()) + s.controller = gomock.NewController(s.T()) + s.currentCluster = "test" + + s.mockResource = resource.NewTest(s.controller, metrics.History) + s.mockDomainCache = s.mockResource.DomainCache + s.mockClientBean = s.mockResource.ClientBean + s.adminClient = s.mockResource.RemoteAdminClient + s.clusterMetadata = s.mockResource.ClusterMetadata + s.executionManager = s.mockResource.ExecutionMgr + s.nDCHistoryResender = xdc.NewMockNDCHistoryResender(s.controller) + s.historyRereplicator = &xdc.MockHistoryRereplicator{} + logger := log.NewNoop() + s.mockShard = &shardContextImpl{ + shardID: 0, + Resource: s.mockResource, + shardInfo: &persistence.ShardInfo{ + ShardID: 0, + RangeID: 1, + ReplicationAckLevel: 0, + ReplicationDLQAckLevel: map[string]int64{"test": -1}}, + transferSequenceNumber: 1, + maxTransferSequenceNumber: 100000, + closeCh: make(chan int, 100), + config: NewDynamicConfigForTest(), + logger: logger, + remoteClusterCurrentTime: make(map[string]time.Time), + executionManager: s.executionManager, + } + s.mockEngine = NewMockEngine(s.controller) + s.config = NewDynamicConfigForTest() + s.historyClient = historyservicetest.NewMockClient(s.controller) + metricsClient := metrics.NewClient(tally.NoopScope, metrics.History) + s.clusterMetadata.EXPECT().GetCurrentClusterName().Return("active").AnyTimes() + + s.replicationTaskHandler = newReplicationTaskExecutor( + s.currentCluster, + s.mockDomainCache, + s.nDCHistoryResender, + s.historyRereplicator, + s.mockEngine, + metricsClient, + s.mockShard.GetLogger(), + ).(*replicationTaskExecutorImpl) +} + +func (s *replicationTaskExecutorSuite) TearDownTest() { + s.controller.Finish() + s.mockResource.Finish(s.T()) +} + +func (s *replicationTaskExecutorSuite) TestConvertRetryTaskError_OK() { + err := &shared.RetryTaskError{} + _, ok := s.replicationTaskHandler.convertRetryTaskError(err) + s.True(ok) +} + +func (s *replicationTaskExecutorSuite) TestConvertRetryTaskError_NotOK() { + err := &shared.RetryTaskV2Error{} + _, ok := s.replicationTaskHandler.convertRetryTaskError(err) + s.False(ok) +} + +func (s *replicationTaskExecutorSuite) TestConvertRetryTaskV2Error_OK() { + err := &shared.RetryTaskV2Error{} + _, ok := s.replicationTaskHandler.convertRetryTaskV2Error(err) + s.True(ok) +} + +func (s *replicationTaskExecutorSuite) TestConvertRetryTaskV2Error_NotOK() { + err := &shared.RetryTaskError{} + _, ok := s.replicationTaskHandler.convertRetryTaskV2Error(err) + s.False(ok) +} + +func (s *replicationTaskExecutorSuite) TestFilterTask() { + domainID := uuid.New() + s.mockDomainCache.EXPECT(). + GetDomainByID(domainID). + Return(cache.NewGlobalDomainCacheEntryForTest( + nil, + nil, + &persistence.DomainReplicationConfig{ + Clusters: []*persistence.ClusterReplicationConfig{ + { + ClusterName: "test", + }, + }}, + 0, + s.clusterMetadata, + ), nil) + ok, err := s.replicationTaskHandler.filterTask(domainID, false) + s.NoError(err) + s.True(ok) +} + +func (s *replicationTaskExecutorSuite) TestFilterTask_Error() { + domainID := uuid.New() + s.mockDomainCache.EXPECT(). + GetDomainByID(domainID). + Return(nil, fmt.Errorf("test")) + ok, err := s.replicationTaskHandler.filterTask(domainID, false) + s.Error(err) + s.False(ok) +} + +func (s *replicationTaskExecutorSuite) TestFilterTask_EnforceApply() { + domainID := uuid.New() + ok, err := s.replicationTaskHandler.filterTask(domainID, true) + s.NoError(err) + s.True(ok) +} + +func (s *replicationTaskExecutorSuite) TestProcessTaskOnce_SyncActivityReplicationTask() { + domainID := uuid.New() + workflowID := uuid.New() + runID := uuid.New() + task := &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeSyncActivity.Ptr(), + SyncActivityTaskAttributes: &replicator.SyncActivityTaskAttributes{ + DomainId: common.StringPtr(domainID), + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + }, + } + request := &history.SyncActivityRequest{ + DomainId: common.StringPtr(domainID), + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + } + + s.mockEngine.EXPECT().SyncActivity(gomock.Any(), request).Return(nil).Times(1) + _, err := s.replicationTaskHandler.execute(s.currentCluster, task, true) + s.NoError(err) +} + +func (s *replicationTaskExecutorSuite) TestProcessTaskOnce_HistoryReplicationTask() { + domainID := uuid.New() + workflowID := uuid.New() + runID := uuid.New() + task := &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeHistory.Ptr(), + HistoryTaskAttributes: &replicator.HistoryTaskAttributes{ + DomainId: common.StringPtr(domainID), + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + }, + } + request := &history.ReplicateEventsRequest{ + DomainUUID: common.StringPtr(domainID), + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + }, + SourceCluster: common.StringPtr("test"), + ForceBufferEvents: common.BoolPtr(false), + } + + s.mockEngine.EXPECT().ReplicateEvents(gomock.Any(), request).Return(nil).Times(1) + _, err := s.replicationTaskHandler.execute(s.currentCluster, task, true) + s.NoError(err) +} + +func (s *replicationTaskExecutorSuite) TestProcess_HistoryV2ReplicationTask() { + domainID := uuid.New() + workflowID := uuid.New() + runID := uuid.New() + task := &replicator.ReplicationTask{ + TaskType: replicator.ReplicationTaskTypeHistoryV2.Ptr(), + HistoryTaskV2Attributes: &replicator.HistoryTaskV2Attributes{ + DomainId: common.StringPtr(domainID), + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + }, + } + request := &history.ReplicateEventsV2Request{ + DomainUUID: common.StringPtr(domainID), + WorkflowExecution: &shared.WorkflowExecution{ + WorkflowId: common.StringPtr(workflowID), + RunId: common.StringPtr(runID), + }, + } + + s.mockEngine.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil).Times(1) + _, err := s.replicationTaskHandler.execute(s.currentCluster, task, true) + s.NoError(err) +} diff --git a/service/history/replicationTaskProcessor.go b/service/history/replicationTaskProcessor.go index 609c2ec0f9f..1fb7507ff32 100644 --- a/service/history/replicationTaskProcessor.go +++ b/service/history/replicationTaskProcessor.go @@ -34,15 +34,12 @@ import ( h "github.com/uber/cadence/.gen/go/history" r "github.com/uber/cadence/.gen/go/replicator" "github.com/uber/cadence/.gen/go/shared" - hc "github.com/uber/cadence/client/history" "github.com/uber/cadence/common" "github.com/uber/cadence/common/backoff" - "github.com/uber/cadence/common/cache" "github.com/uber/cadence/common/log" "github.com/uber/cadence/common/log/tag" "github.com/uber/cadence/common/metrics" "github.com/uber/cadence/common/persistence" - "github.com/uber/cadence/common/xdc" ) const ( @@ -61,18 +58,16 @@ var ( type ( // ReplicationTaskProcessorImpl is responsible for processing replication tasks for a shard. ReplicationTaskProcessorImpl struct { - currentCluster string - sourceCluster string - status int32 - shard ShardContext - historyEngine Engine - historySerializer persistence.PayloadSerializer - config *Config - domainCache cache.DomainCache - metricsClient metrics.Client - logger log.Logger - nDCHistoryResender xdc.NDCHistoryResender - historyRereplicator xdc.HistoryRereplicator + currentCluster string + sourceCluster string + status int32 + shard ShardContext + historyEngine Engine + historySerializer persistence.PayloadSerializer + config *Config + metricsClient metrics.Client + logger log.Logger + replicationTaskExecutor replicationTaskExecutor taskRetryPolicy backoff.RetryPolicy dlqRetryPolicy backoff.RetryPolicy @@ -102,9 +97,9 @@ func NewReplicationTaskProcessor( shard ShardContext, historyEngine Engine, config *Config, - historyClient hc.Client, metricsClient metrics.Client, replicationTaskFetcher ReplicationTaskFetcher, + replicationTaskExecutor replicationTaskExecutor, ) *ReplicationTaskProcessorImpl { taskRetryPolicy := backoff.NewExponentialRetryPolicy(config.ReplicationTaskProcessorErrorRetryWait()) taskRetryPolicy.SetBackoffCoefficient(taskErrorRetryBackoffCoefficient) @@ -117,47 +112,24 @@ func NewReplicationTaskProcessor( noTaskBackoffPolicy.SetBackoffCoefficient(1) noTaskBackoffPolicy.SetExpirationInterval(backoff.NoInterval) noTaskRetrier := backoff.NewRetrier(noTaskBackoffPolicy, backoff.SystemClock) - - nDCHistoryResender := xdc.NewNDCHistoryResender( - shard.GetDomainCache(), - shard.GetService().GetClientBean().GetRemoteAdminClient(replicationTaskFetcher.GetSourceCluster()), - func(ctx context.Context, request *h.ReplicateEventsV2Request) error { - return historyClient.ReplicateEventsV2(ctx, request) - }, - shard.GetService().GetPayloadSerializer(), - shard.GetLogger(), - ) - historyRereplicator := xdc.NewHistoryRereplicator( - replicationTaskFetcher.GetSourceCluster(), - shard.GetDomainCache(), - shard.GetService().GetClientBean().GetRemoteAdminClient(replicationTaskFetcher.GetSourceCluster()), - func(ctx context.Context, request *h.ReplicateRawEventsRequest) error { - return historyClient.ReplicateRawEvents(ctx, request) - }, - shard.GetService().GetPayloadSerializer(), - replicationTimeout, - shard.GetLogger(), - ) return &ReplicationTaskProcessorImpl{ - currentCluster: shard.GetClusterMetadata().GetCurrentClusterName(), - sourceCluster: replicationTaskFetcher.GetSourceCluster(), - status: common.DaemonStatusInitialized, - shard: shard, - historyEngine: historyEngine, - historySerializer: persistence.NewPayloadSerializer(), - config: config, - domainCache: shard.GetDomainCache(), - metricsClient: metricsClient, - logger: shard.GetLogger(), - nDCHistoryResender: nDCHistoryResender, - historyRereplicator: historyRereplicator, - taskRetryPolicy: taskRetryPolicy, - noTaskRetrier: noTaskRetrier, - requestChan: replicationTaskFetcher.GetRequestChan(), - syncShardChan: make(chan *r.SyncShardStatus), - done: make(chan struct{}), - lastProcessedMessageID: emptyMessageID, - lastRetrievedMessageID: emptyMessageID, + currentCluster: shard.GetClusterMetadata().GetCurrentClusterName(), + sourceCluster: replicationTaskFetcher.GetSourceCluster(), + status: common.DaemonStatusInitialized, + shard: shard, + historyEngine: historyEngine, + historySerializer: persistence.NewPayloadSerializer(), + config: config, + metricsClient: metricsClient, + logger: shard.GetLogger(), + replicationTaskExecutor: replicationTaskExecutor, + taskRetryPolicy: taskRetryPolicy, + noTaskRetrier: noTaskRetrier, + requestChan: replicationTaskFetcher.GetRequestChan(), + syncShardChan: make(chan *r.SyncShardStatus), + done: make(chan struct{}), + lastProcessedMessageID: emptyMessageID, + lastRetrievedMessageID: emptyMessageID, } } @@ -382,30 +354,10 @@ func (p *ReplicationTaskProcessorImpl) processSingleTask(replicationTask *r.Repl } func (p *ReplicationTaskProcessorImpl) processTaskOnce(replicationTask *r.ReplicationTask) error { - var err error - var scope int - switch replicationTask.GetTaskType() { - case r.ReplicationTaskTypeDomain: - // Domain replication task should be handled in worker (domainReplicationMessageProcessor) - panic("task type not supported") - case r.ReplicationTaskTypeSyncShardStatus: - // Shard status will be sent as part of the Replication message without kafka - case r.ReplicationTaskTypeSyncActivity: - scope = metrics.SyncActivityTaskScope - err = p.handleActivityTask(replicationTask) - case r.ReplicationTaskTypeHistory: - scope = metrics.HistoryReplicationTaskScope - err = p.handleHistoryReplicationTask(replicationTask) - case r.ReplicationTaskTypeHistoryMetadata: - // Without kafka we should not have size limits so we don't necessary need this in the new replication scheme. - case r.ReplicationTaskTypeHistoryV2: - scope = metrics.HistoryReplicationV2TaskScope - err = p.handleHistoryReplicationTaskV2(replicationTask) - default: - p.logger.Error("Unknown task type.") - scope = metrics.ReplicatorScope - err = ErrUnknownReplicationTask - } + scope, err := p.replicationTaskExecutor.execute( + p.sourceCluster, + replicationTask, + false) if err != nil { p.updateFailureMetric(scope, err) @@ -569,228 +521,3 @@ func (p *ReplicationTaskProcessorImpl) updateFailureMetric(scope int, err error) } } } - -func (p *ReplicationTaskProcessorImpl) handleActivityTask( - task *r.ReplicationTask, -) error { - - attr := task.SyncActivityTaskAttributes - doContinue, err := p.filterTask(attr.GetDomainId()) - if err != nil || !doContinue { - return err - } - - request := &h.SyncActivityRequest{ - DomainId: attr.DomainId, - WorkflowId: attr.WorkflowId, - RunId: attr.RunId, - Version: attr.Version, - ScheduledId: attr.ScheduledId, - ScheduledTime: attr.ScheduledTime, - StartedId: attr.StartedId, - StartedTime: attr.StartedTime, - LastHeartbeatTime: attr.LastHeartbeatTime, - Details: attr.Details, - Attempt: attr.Attempt, - LastFailureReason: attr.LastFailureReason, - LastWorkerIdentity: attr.LastWorkerIdentity, - VersionHistory: attr.GetVersionHistory(), - } - ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) - defer cancel() - err = p.historyEngine.SyncActivity(ctx, request) - // Handle resend error - retryV2Err, okV2 := p.convertRetryTaskV2Error(err) - //TODO: remove handling retry error v1 after 2DC deprecation - retryV1Err, okV1 := p.convertRetryTaskError(err) - - if !okV1 && !okV2 { - return err - } else if okV1 { - if retryV1Err.GetRunId() == "" { - return err - } - p.metricsClient.IncCounter(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientRequests) - stopwatch := p.metricsClient.StartTimer(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientLatency) - defer stopwatch.Stop() - - // this is the retry error - if resendErr := p.historyRereplicator.SendMultiWorkflowHistory( - attr.GetDomainId(), - attr.GetWorkflowId(), - retryV1Err.GetRunId(), - retryV1Err.GetNextEventId(), - attr.GetRunId(), - attr.GetScheduledId()+1, // the next event ID should be at activity schedule ID + 1 - ); resendErr != nil { - p.logger.Error("error resend history for sync activity", tag.Error(resendErr)) - // should return the replication error, not the resending error - return err - } - } else if okV2 { - p.metricsClient.IncCounter(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientRequests) - stopwatch := p.metricsClient.StartTimer(metrics.HistoryRereplicationByActivityReplicationScope, metrics.CadenceClientLatency) - defer stopwatch.Stop() - - if resendErr := p.nDCHistoryResender.SendSingleWorkflowHistory( - retryV2Err.GetDomainId(), - retryV2Err.GetWorkflowId(), - retryV2Err.GetRunId(), - retryV2Err.StartEventId, - retryV2Err.StartEventVersion, - retryV2Err.EndEventId, - retryV2Err.EndEventVersion, - ); resendErr != nil { - p.logger.Error("error resend history for sync activity", tag.Error(resendErr)) - // should return the replication error, not the resending error - return err - } - } - // should try again after back fill the history - return p.historyEngine.SyncActivity(ctx, request) -} - -//TODO: remove this part after 2DC deprecation -func (p *ReplicationTaskProcessorImpl) handleHistoryReplicationTask( - task *r.ReplicationTask, -) error { - - attr := task.HistoryTaskAttributes - doContinue, err := p.filterTask(attr.GetDomainId()) - if err != nil || !doContinue { - return err - } - - request := &h.ReplicateEventsRequest{ - SourceCluster: common.StringPtr(p.sourceCluster), - DomainUUID: attr.DomainId, - WorkflowExecution: &shared.WorkflowExecution{ - WorkflowId: attr.WorkflowId, - RunId: attr.RunId, - }, - FirstEventId: attr.FirstEventId, - NextEventId: attr.NextEventId, - Version: attr.Version, - ReplicationInfo: attr.ReplicationInfo, - History: attr.History, - NewRunHistory: attr.NewRunHistory, - ForceBufferEvents: common.BoolPtr(false), - ResetWorkflow: attr.ResetWorkflow, - NewRunNDC: attr.NewRunNDC, - } - ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) - defer cancel() - - err = p.historyEngine.ReplicateEvents(ctx, request) - retryErr, ok := p.convertRetryTaskError(err) - if !ok || retryErr.GetRunId() == "" { - return err - } - - p.metricsClient.IncCounter(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientRequests) - stopwatch := p.metricsClient.StartTimer(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientLatency) - defer stopwatch.Stop() - - resendErr := p.historyRereplicator.SendMultiWorkflowHistory( - attr.GetDomainId(), - attr.GetWorkflowId(), - retryErr.GetRunId(), - retryErr.GetNextEventId(), - attr.GetRunId(), - attr.GetFirstEventId(), - ) - if resendErr != nil { - p.logger.Error("error resend history for history event", tag.Error(resendErr)) - // should return the replication error, not the resending error - return err - } - - return p.historyEngine.ReplicateEvents(ctx, request) -} - -func (p *ReplicationTaskProcessorImpl) handleHistoryReplicationTaskV2( - task *r.ReplicationTask, -) error { - - attr := task.HistoryTaskV2Attributes - doContinue, err := p.filterTask(attr.GetDomainId()) - if err != nil || !doContinue { - return err - } - - request := &h.ReplicateEventsV2Request{ - DomainUUID: attr.DomainId, - WorkflowExecution: &shared.WorkflowExecution{ - WorkflowId: attr.WorkflowId, - RunId: attr.RunId, - }, - VersionHistoryItems: attr.VersionHistoryItems, - Events: attr.Events, - // new run events does not need version history since there is no prior events - NewRunEvents: attr.NewRunEvents, - } - ctx, cancel := context.WithTimeout(context.Background(), replicationTimeout) - defer cancel() - - err = p.historyEngine.ReplicateEventsV2(ctx, request) - retryErr, ok := p.convertRetryTaskV2Error(err) - if !ok { - return err - } - p.metricsClient.IncCounter(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientRequests) - stopwatch := p.metricsClient.StartTimer(metrics.HistoryRereplicationByHistoryReplicationScope, metrics.CadenceClientLatency) - defer stopwatch.Stop() - - if resendErr := p.nDCHistoryResender.SendSingleWorkflowHistory( - retryErr.GetDomainId(), - retryErr.GetWorkflowId(), - retryErr.GetRunId(), - retryErr.StartEventId, - retryErr.StartEventVersion, - retryErr.EndEventId, - retryErr.EndEventVersion, - ); resendErr != nil { - p.logger.Error("error resend history for history event v2", tag.Error(resendErr)) - // should return the replication error, not the resending error - return err - } - - return p.historyEngine.ReplicateEventsV2(ctx, request) -} - -func (p *ReplicationTaskProcessorImpl) filterTask( - domainID string, -) (bool, error) { - - domainEntry, err := p.domainCache.GetDomainByID(domainID) - if err != nil { - return false, err - } - - shouldProcessTask := false -FilterLoop: - for _, targetCluster := range domainEntry.GetReplicationConfig().Clusters { - if p.currentCluster == targetCluster.ClusterName { - shouldProcessTask = true - break FilterLoop - } - } - return shouldProcessTask, nil -} - -//TODO: remove this code after 2DC deprecation -func (p *ReplicationTaskProcessorImpl) convertRetryTaskError( - err error, -) (*shared.RetryTaskError, bool) { - - retError, ok := err.(*shared.RetryTaskError) - return retError, ok -} - -func (p *ReplicationTaskProcessorImpl) convertRetryTaskV2Error( - err error, -) (*shared.RetryTaskV2Error, bool) { - - retError, ok := err.(*shared.RetryTaskV2Error) - return retError, ok -} diff --git a/service/history/replicationTaskProcessor_test.go b/service/history/replicationTaskProcessor_test.go index cf88a26b6f2..73117ec38b7 100644 --- a/service/history/replicationTaskProcessor_test.go +++ b/service/history/replicationTaskProcessor_test.go @@ -21,7 +21,6 @@ package history import ( - "fmt" "testing" "time" @@ -53,18 +52,19 @@ type ( *require.Assertions controller *gomock.Controller - mockResource *resource.Test - mockShard ShardContext - mockEngine *MockEngine - config *Config - historyClient *historyservicetest.MockClient - replicationTaskFetcher *MockReplicationTaskFetcher - mockDomainCache *cache.MockDomainCache - mockClientBean *client.MockBean - adminClient *adminservicetest.MockClient - clusterMetadata *cluster.MockMetadata - executionManager *mocks.ExecutionManager - requestChan chan *request + mockResource *resource.Test + mockShard ShardContext + mockEngine *MockEngine + config *Config + historyClient *historyservicetest.MockClient + replicationTaskFetcher *MockReplicationTaskFetcher + mockDomainCache *cache.MockDomainCache + mockClientBean *client.MockBean + adminClient *adminservicetest.MockClient + clusterMetadata *cluster.MockMetadata + executionManager *mocks.ExecutionManager + requestChan chan *request + replicationTaskExecutor *MockreplicationTaskExecutor replicationTaskProcessor *ReplicationTaskProcessorImpl } @@ -93,6 +93,7 @@ func (s *replicationTaskProcessorSuite) SetupTest() { s.adminClient = s.mockResource.RemoteAdminClient s.clusterMetadata = s.mockResource.ClusterMetadata s.executionManager = s.mockResource.ExecutionMgr + s.replicationTaskExecutor = NewMockreplicationTaskExecutor(s.controller) logger := log.NewNoop() s.mockShard = &shardContextImpl{ shardID: 0, @@ -122,9 +123,9 @@ func (s *replicationTaskProcessorSuite) SetupTest() { s.mockShard, s.mockEngine, s.config, - s.historyClient, metricsClient, s.replicationTaskFetcher, + s.replicationTaskExecutor, ) } @@ -142,61 +143,6 @@ func (s *replicationTaskProcessorSuite) TestSendFetchMessageRequest() { s.Equal(int64(-1), requestMessage.token.GetLastRetrievedMessageId()) } -func (s *replicationTaskProcessorSuite) TestConvertRetryTaskError_OK() { - err := &shared.RetryTaskError{} - _, ok := s.replicationTaskProcessor.convertRetryTaskError(err) - s.True(ok) -} - -func (s *replicationTaskProcessorSuite) TestConvertRetryTaskError_NotOK() { - err := &shared.RetryTaskV2Error{} - _, ok := s.replicationTaskProcessor.convertRetryTaskError(err) - s.False(ok) -} - -func (s *replicationTaskProcessorSuite) TestConvertRetryTaskV2Error_OK() { - err := &shared.RetryTaskV2Error{} - _, ok := s.replicationTaskProcessor.convertRetryTaskV2Error(err) - s.True(ok) -} - -func (s *replicationTaskProcessorSuite) TestConvertRetryTaskV2Error_NotOK() { - err := &shared.RetryTaskError{} - _, ok := s.replicationTaskProcessor.convertRetryTaskV2Error(err) - s.False(ok) -} - -func (s *replicationTaskProcessorSuite) TestFilterTask() { - domainID := uuid.New() - s.mockDomainCache.EXPECT(). - GetDomainByID(domainID). - Return(cache.NewGlobalDomainCacheEntryForTest( - nil, - nil, - &persistence.DomainReplicationConfig{ - Clusters: []*persistence.ClusterReplicationConfig{ - { - ClusterName: "active", - }, - }}, - 0, - s.clusterMetadata, - ), nil) - ok, err := s.replicationTaskProcessor.filterTask(domainID) - s.NoError(err) - s.True(ok) -} - -func (s *replicationTaskProcessorSuite) TestFilterTask_Error() { - domainID := uuid.New() - s.mockDomainCache.EXPECT(). - GetDomainByID(domainID). - Return(nil, fmt.Errorf("test")) - ok, err := s.replicationTaskProcessor.filterTask(domainID) - s.Error(err) - s.False(ok) -} - func (s *replicationTaskProcessorSuite) TestHandleSyncShardStatus() { now := time.Now() s.mockEngine.EXPECT().SyncShardStatus(gomock.Any(), &history.SyncShardStatusRequest{ @@ -211,153 +157,6 @@ func (s *replicationTaskProcessorSuite) TestHandleSyncShardStatus() { s.NoError(err) } -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_DomainReplicationTask() { - defer func() { - if r := recover(); r == nil { - s.Fail("The Domain replication task should panic") - } - }() - - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeDomain.Ptr(), - } - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_SyncShardReplicationTask() { - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeSyncShardStatus.Ptr(), - } - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_HistoryMetadataReplicationTask() { - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeHistoryMetadata.Ptr(), - } - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_SyncActivityReplicationTask() { - domainID := uuid.New() - workflowID := uuid.New() - runID := uuid.New() - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeSyncActivity.Ptr(), - SyncActivityTaskAttributes: &replicator.SyncActivityTaskAttributes{ - DomainId: common.StringPtr(domainID), - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - }, - } - request := &history.SyncActivityRequest{ - DomainId: common.StringPtr(domainID), - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - } - - s.mockDomainCache.EXPECT(). - GetDomainByID(domainID). - Return(cache.NewGlobalDomainCacheEntryForTest( - nil, - nil, - &persistence.DomainReplicationConfig{ - Clusters: []*persistence.ClusterReplicationConfig{ - { - ClusterName: "active", - }, - }}, - 0, - s.clusterMetadata, - ), nil).Times(1) - s.mockEngine.EXPECT().SyncActivity(gomock.Any(), request).Return(nil).Times(1) - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_HistoryReplicationTask() { - domainID := uuid.New() - workflowID := uuid.New() - runID := uuid.New() - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeHistory.Ptr(), - HistoryTaskAttributes: &replicator.HistoryTaskAttributes{ - DomainId: common.StringPtr(domainID), - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - }, - } - request := &history.ReplicateEventsRequest{ - DomainUUID: common.StringPtr(domainID), - WorkflowExecution: &shared.WorkflowExecution{ - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - }, - SourceCluster: common.StringPtr("standby"), - ForceBufferEvents: common.BoolPtr(false), - } - - s.mockDomainCache.EXPECT(). - GetDomainByID(domainID). - Return(cache.NewGlobalDomainCacheEntryForTest( - nil, - nil, - &persistence.DomainReplicationConfig{ - Clusters: []*persistence.ClusterReplicationConfig{ - { - ClusterName: "active", - }, - }}, - 0, - s.clusterMetadata, - ), nil).Times(1) - s.mockEngine.EXPECT().ReplicateEvents(gomock.Any(), request).Return(nil).Times(1) - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - -func (s *replicationTaskProcessorSuite) TestProcessTaskOnce_HistoryV2ReplicationTask() { - domainID := uuid.New() - workflowID := uuid.New() - runID := uuid.New() - task := &replicator.ReplicationTask{ - TaskType: replicator.ReplicationTaskTypeHistoryV2.Ptr(), - HistoryTaskV2Attributes: &replicator.HistoryTaskV2Attributes{ - DomainId: common.StringPtr(domainID), - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - }, - } - request := &history.ReplicateEventsV2Request{ - DomainUUID: common.StringPtr(domainID), - WorkflowExecution: &shared.WorkflowExecution{ - WorkflowId: common.StringPtr(workflowID), - RunId: common.StringPtr(runID), - }, - } - - s.mockDomainCache.EXPECT(). - GetDomainByID(domainID). - Return(cache.NewGlobalDomainCacheEntryForTest( - nil, - nil, - &persistence.DomainReplicationConfig{ - Clusters: []*persistence.ClusterReplicationConfig{ - { - ClusterName: "active", - }, - }}, - 0, - s.clusterMetadata, - ), nil).Times(1) - s.mockEngine.EXPECT().ReplicateEventsV2(gomock.Any(), request).Return(nil).Times(1) - err := s.replicationTaskProcessor.processTaskOnce(task) - s.NoError(err) -} - func (s *replicationTaskProcessorSuite) TestPutReplicationTaskToDLQ_SyncActivityReplicationTask() { domainID := uuid.New() workflowID := uuid.New() diff --git a/service/history/shardContext.go b/service/history/shardContext.go index ce947e08a49..4e09e34faed 100644 --- a/service/history/shardContext.go +++ b/service/history/shardContext.go @@ -77,6 +77,8 @@ type ( GetReplicatorAckLevel() int64 UpdateReplicatorAckLevel(ackLevel int64) error + GetReplicatorDLQAckLevel(sourceCluster string) int64 + UpdateReplicatorDLQAckLevel(sourCluster string, ackLevel int64) error GetClusterReplicationLevel(cluster string) int64 UpdateClusterReplicationLevel(cluster string, lastTaskID int64) error @@ -244,6 +246,28 @@ func (s *shardContextImpl) UpdateReplicatorAckLevel(ackLevel int64) error { return s.updateShardInfoLocked() } +func (s *shardContextImpl) GetReplicatorDLQAckLevel(sourceCluster string) int64 { + s.RLock() + defer s.RUnlock() + + if ackLevel, ok := s.shardInfo.ReplicationDLQAckLevel[sourceCluster]; ok { + return ackLevel + } + return -1 +} + +func (s *shardContextImpl) UpdateReplicatorDLQAckLevel( + sourceCluster string, + ackLevel int64, +) error { + + s.Lock() + defer s.Unlock() + s.shardInfo.ReplicationDLQAckLevel[sourceCluster] = ackLevel + s.shardInfo.StolenSinceRenew = 0 + return s.updateShardInfoLocked() +} + func (s *shardContextImpl) GetClusterReplicationLevel(cluster string) int64 { s.RLock() defer s.RUnlock()