From 7b8017c2ab2311f07fdfca543b0f95560112495d Mon Sep 17 00:00:00 2001 From: Konrad `ktoso` Malawski Date: Thu, 27 Oct 2022 17:19:56 +0900 Subject: [PATCH] !all node renames: node -> endpoint; unique node -> node (#1077) --- .../main.swift | 2 +- .../MultiNode+ClusterSingletonTests.swift | 2 +- .../MultiNode+ReceptionistTests.swift | 4 +- Protos/ActorID.proto | 10 +- Protos/Clocks/VersionVector.proto | 6 +- Protos/Cluster/Cluster.proto | 4 +- Protos/Cluster/ClusterEvents.proto | 4 +- Protos/Cluster/Membership.proto | 20 +- Protos/Cluster/SWIM/SWIM.proto | 2 +- Protos/WireProtocol.proto | 24 +- .../DistributedDiningPhilosophers.swift | 8 +- .../ClusteredActorSystemsXCTestCase.swift | 64 +- Sources/DistributedCluster/ActorID.swift | 225 +---- .../DistributedCluster/ActorRefProvider.swift | 6 +- .../VersionVector+Serialization.swift | 28 +- .../Clocks/Protobuf/VersionVector.pb.swift | 505 +++++++----- .../Clocks/VersionVector.swift | 76 +- .../Cluster/Association.swift | 14 +- .../Cluster/Cluster+Event.swift | 10 +- .../Cluster/Cluster+Member.swift | 28 +- .../Cluster/Cluster+Membership.swift | 116 +-- .../Cluster/ClusterControl.swift | 51 +- .../Cluster/ClusterEventStream.swift | 2 +- .../Cluster/ClusterShell+LeaderActions.swift | 2 +- .../Cluster/ClusterShell+Logging.swift | 8 +- .../Cluster/ClusterShell.swift | 138 ++-- .../Cluster/ClusterShellState.swift | 48 +- .../Cluster/DiscoveryShell.swift | 16 +- .../Cluster/DistributedNodeDeathWatcher.swift | 18 +- .../Cluster/Downing/DowningSettings.swift | 2 +- .../Cluster/Downing/DowningStrategy.swift | 2 +- .../Downing/TimeoutBasedDowningStrategy.swift | 4 +- .../Cluster/HandshakeStateMachine.swift | 42 +- .../Cluster/Leadership.swift | 2 +- ...uster+MembershipGossip+Serialization.swift | 26 +- .../Cluster+MembershipGossip.swift | 42 +- .../Cluster+MembershipGossipLogic.swift | 4 +- .../Cluster/NodeDeathWatcher.swift | 30 +- .../Cluster/Protobuf/Cluster.pb.swift | 302 ++++--- .../Cluster/Protobuf/ClusterEvents.pb.swift | 344 ++++---- .../Protobuf/Membership+Serialization.swift | 10 +- .../Cluster/Protobuf/Membership.pb.swift | 381 ++++++--- .../OperationLogDistributedReceptionist.swift | 14 +- ...rationLogClusterReceptionistBehavior.swift | 12 +- .../SWIM/ClusterMembership+Converters.swift | 22 +- .../SWIM/Protobuf/SWIM+Serialization.swift | 20 +- .../Cluster/SWIM/Protobuf/SWIM.pb.swift | 436 ++++++---- .../Cluster/SWIM/SWIMActor.swift | 26 +- .../Cluster/SystemMessages+Redelivery.swift | 2 +- .../RemoteClusterActorPersonality.swift | 4 +- .../Transport/TransportPipelines.swift | 16 +- .../Cluster/Transport/WireMessages.swift | 16 +- .../DistributedCluster/ClusterEndpoint.swift | 88 ++ Sources/DistributedCluster/ClusterNode.swift | 136 +++ .../DistributedCluster/ClusterSystem.swift | 42 +- .../ClusterSystemSettings.swift | 62 +- Sources/DistributedCluster/DeadLetters.swift | 8 +- .../Docs.docc/ClusterSingleton.md | 4 +- .../Docs.docc/Clustering.md | 26 +- .../DistributedCluster/Docs.docc/Security.md | 2 +- .../Gossip/Gossiper+Shell.swift | 2 +- .../LifecycleWatchContainer.swift | 14 +- .../_BehaviorDeathWatch.swift | 8 +- .../ClusterSingletonAllocationStrategy.swift | 16 +- .../ClusterSingletonBoss.swift | 28 +- .../Protobuf/ActorID+Serialization.swift | 82 +- .../Protobuf/ActorID.pb.swift | 237 ++++-- .../Protobuf/SystemMessages.pb.swift | 439 ++++++---- .../Protobuf/WireProtocol+Serialization.swift | 16 +- .../Protobuf/WireProtocol.pb.swift | 773 +++++++++++------- .../DistributedReceptionist.swift | 18 +- .../Receptionist/Receptionist.swift | 18 +- Sources/DistributedCluster/Refs+any.swift | 2 +- Sources/DistributedCluster/Refs.swift | 4 +- .../ActorRef+Serialization.swift | 16 +- .../Protobuf/Serialization.pb.swift | 10 +- .../Serialization/Serialization+Context.swift | 4 +- .../Serialization+Settings.swift | 6 +- .../Serialization/Serialization.swift | 2 +- .../DistributedCluster/SystemMessages.swift | 2 +- Sources/DistributedCluster/_ActorShell.swift | 2 +- Sources/DistributedCluster/utils.swift | 2 +- .../MultiNodeTestConductor.swift | 29 +- .../MultiNodeTestKit+Control.swift | 14 +- .../MultiNodeTestKit/MultiNodeTestKit.swift | 9 +- .../boot+MultiNodeTestKitRunner+Exec.swift | 24 +- .../ClusterDocExamples.swift | 10 +- .../SerializationDocExamples.pb.swift | 8 +- .../ActorIDMetadataTests.swift | 2 +- .../ActorIDTests.swift | 34 +- .../ActorRefAdapterTests.swift | 10 +- .../VersionVector+SerializationTests.swift | 4 +- .../Clocks/VersionVectorTests.swift | 2 +- .../Cluster/AssociationClusteredTests.swift | 168 ++-- .../Cluster/ClusterDiscoveryTests.swift | 42 +- .../Cluster/ClusterEventStreamTests.swift | 12 +- .../ClusterLeaderActionsClusteredTests.swift | 80 +- .../Cluster/ClusterLeaderActionsTests.swift | 24 +- .../ClusterMembershipSnapshotTests.swift | 12 +- .../Cluster/ClusterOnDownActionTests.swift | 4 +- .../DowningClusteredTests.swift | 42 +- .../TimeoutBasedDowningInstanceTests.swift | 8 +- .../Cluster/GossipSeenTableTests.swift | 12 +- .../Cluster/LeadershipTests.swift | 40 +- .../MembershipGossipClusteredTests.swift | 46 +- ...MembershipGossipLogicSimulationTests.swift | 72 +- .../Cluster/MembershipGossipTests.swift | 24 +- .../ClusterEvents+SerializationTests.swift | 6 +- .../Membership+SerializationTests.swift | 10 +- .../Cluster/ProtobufRoundTripTests.swift | 8 +- ...istributedReceptionistClusteredTests.swift | 16 +- ...LogClusterReceptionistClusteredTests.swift | 28 +- .../Cluster/RemoteActorRefProviderTests.swift | 22 +- .../RemoteMessagingClusteredTests.swift | 34 +- .../RemotingHandshakeStateMachineTests.swift | 16 +- .../Cluster/RemotingTLSClusteredTests.swift | 30 +- .../SWIM/SWIMActorClusteredTests.swift | 38 +- .../ShootTheOtherNodeClusteredTests.swift | 4 +- .../SystemMessagesRedeliveryTests.swift | 72 +- .../TestExtensions+MembershipDSL.swift | 24 +- .../Cluster/TestExtensions.swift | 4 +- .../ClusterSystem+Testing.swift | 6 +- .../ClusterSystemTests.swift | 10 +- .../DeadLetterTests.swift | 4 +- .../DistributedReceptionistTests.swift | 8 +- .../EndpointTests.swift | 57 ++ .../InterceptorTests.swift | 4 +- .../LifecycleWatchTests.swift | 2 +- .../MembershipTests.swift | 140 ++-- .../Metrics/SWIMActorPeerMetricsTests.swift | 10 +- .../NodeDeathWatcherTests.swift | 6 +- Tests/DistributedClusterTests/NodeTests.swift | 57 -- ...ClusterSingletonPluginClusteredTests.swift | 66 +- .../RemoteCallTests.swift | 22 +- .../SerializationTests.swift | 4 +- scripts/generate_protos.sh | 2 +- 136 files changed, 3823 insertions(+), 2888 deletions(-) create mode 100644 Sources/DistributedCluster/ClusterEndpoint.swift create mode 100644 Sources/DistributedCluster/ClusterNode.swift create mode 100644 Tests/DistributedClusterTests/EndpointTests.swift delete mode 100644 Tests/DistributedClusterTests/NodeTests.swift diff --git a/IntegrationTests/tests_01_cluster/it_Clustered_swim_suspension_reachability/main.swift b/IntegrationTests/tests_01_cluster/it_Clustered_swim_suspension_reachability/main.swift index 68c37be82..50eee88c0 100644 --- a/IntegrationTests/tests_01_cluster/it_Clustered_swim_suspension_reachability/main.swift +++ b/IntegrationTests/tests_01_cluster/it_Clustered_swim_suspension_reachability/main.swift @@ -53,7 +53,7 @@ if args.count >= 3 { print("parsing port") let port = Int(args[2])! print("Joining") - system.cluster.join(node: Node(systemName: "System", host: host, port: port)) + system.cluster.join(endpoint: Cluster.Endpoint(systemName: "System", host: host, port: port)) } _Thread.sleep(.seconds(120)) diff --git a/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ClusterSingletonTests.swift b/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ClusterSingletonTests.swift index 8ecb65500..b1803f410 100644 --- a/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ClusterSingletonTests.swift +++ b/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ClusterSingletonTests.swift @@ -79,7 +79,7 @@ public final class MultiNodeClusterSingletonTests: MultiNodeTestSuite { } distributed func greet(name: String) -> String { - "\(self.greeting) \(name)! (from node: \(self.id.uniqueNode), id: \(self.id.detailedDescription))" + "\(self.greeting) \(name)! (from node: \(self.id.node), id: \(self.id.detailedDescription))" } } } diff --git a/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ReceptionistTests.swift b/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ReceptionistTests.swift index a7ed4bad3..ef479b630 100644 --- a/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ReceptionistTests.swift +++ b/MultiNodeTests/DistributedActorsMultiNodeTests/MultiNode+ReceptionistTests.swift @@ -51,7 +51,7 @@ public final class MultiNodeReceptionistTests: MultiNodeTestSuite { let expectedCount = Nodes.allCases.count var discovered: Set = [] for try await actor in await multiNode.system.receptionist.listing(of: .init(DistributedEcho.self)) { - multiNode.log.notice("Discovered \(actor.id) from \(actor.id.uniqueNode)") + multiNode.log.notice("Discovered \(actor.id) from \(actor.id.node)") discovered.insert(actor) if discovered.count == expectedCount { @@ -79,7 +79,7 @@ public final class MultiNodeReceptionistTests: MultiNodeTestSuite { } distributed func echo(name: String) -> String { - "echo: \(self.greeting)\(name)! (from node: \(self.id.uniqueNode), id: \(self.id.detailedDescription))" + "echo: \(self.greeting)\(name)! (from node: \(self.id.node), id: \(self.id.detailedDescription))" } } } diff --git a/Protos/ActorID.proto b/Protos/ActorID.proto index 4b554a9e1..96ca37890 100644 --- a/Protos/ActorID.proto +++ b/Protos/ActorID.proto @@ -18,7 +18,7 @@ option optimize_for = SPEED; option swift_prefix = "_Proto"; message ActorID { - UniqueNode node = 1; + ClusterNode node = 1; ActorPath path = 2; uint32 incarnation = 3; map metadata = 4; @@ -28,12 +28,12 @@ message ActorPath { repeated string segments = 1; } -message UniqueNode { - Node node = 1; - uint64 nid = 2; +message ClusterNode { + ClusterEndpoint endpoint = 1; + uint64 nid = 2; } -message Node { +message ClusterEndpoint { string protocol = 1; string system = 2; string hostname = 3; diff --git a/Protos/Clocks/VersionVector.proto b/Protos/Clocks/VersionVector.proto index 4c67ad5af..52413c688 100644 --- a/Protos/Clocks/VersionVector.proto +++ b/Protos/Clocks/VersionVector.proto @@ -30,9 +30,9 @@ message ActorIdentity { message VersionReplicaID { oneof value { - ActorID actorID = 1; - UniqueNode uniqueNode = 2; - uint64 uniqueNodeID = 3; + ActorID actorID = 1; + ClusterNode node = 2; + uint64 nodeID = 3; } } diff --git a/Protos/Cluster/Cluster.proto b/Protos/Cluster/Cluster.proto index fdff557ca..410510b51 100644 --- a/Protos/Cluster/Cluster.proto +++ b/Protos/Cluster/Cluster.proto @@ -37,6 +37,6 @@ message ClusterInbound { } message ClusterRestInPeace { - UniqueNode targetNode = 1; - UniqueNode fromNode = 2; + ClusterNode targetNode = 1; + ClusterNode fromNode = 2; } diff --git a/Protos/Cluster/ClusterEvents.proto b/Protos/Cluster/ClusterEvents.proto index 3bd71baa1..3e07c8063 100644 --- a/Protos/Cluster/ClusterEvents.proto +++ b/Protos/Cluster/ClusterEvents.proto @@ -32,10 +32,10 @@ message ClusterEvent { } message ClusterMembershipChange { - UniqueNode node = 1; + ClusterNode node = 1; ClusterMemberStatus fromStatus = 2; - ClusterMemberStatus toStatus = 3; + ClusterMemberStatus toStatus = 3; } message ClusterLeadershipChange { diff --git a/Protos/Cluster/Membership.proto b/Protos/Cluster/Membership.proto index 007af26e8..f42cff43d 100644 --- a/Protos/Cluster/Membership.proto +++ b/Protos/Cluster/Membership.proto @@ -23,19 +23,19 @@ import "Clocks/VersionVector.proto"; message ClusterMembership { repeated ClusterMember members = 1; - UniqueNode leaderNode = 2; + ClusterNode leaderNode = 2; } message ClusterMember { - UniqueNode node = 1; - ClusterMemberStatus status = 2; + ClusterNode node = 1; + ClusterMemberStatus status = 2; ClusterMemberReachability reachability = 3; - uint32 upNumber = 4; + uint32 upNumber = 4; } enum ClusterMemberReachability { CLUSTER_MEMBER_REACHABILITY_UNSPECIFIED = 0; - CLUSTER_MEMBER_REACHABILITY_REACHABLE = 1; + CLUSTER_MEMBER_REACHABILITY_REACHABLE = 1; CLUSTER_MEMBER_REACHABILITY_UNREACHABLE = 2; } @@ -51,13 +51,13 @@ enum ClusterMemberStatus { // ==== Membership Gossip ---------------------------------------------------------------------------------------------- message ClusterMembershipGossip { - // Membership contains full UniqueNode renderings, and the owner and seen table refer to them by UniqueNode.ID + // Membership contains full ClusterNode renderings, and the owner and seen table refer to them by ClusterNode.ID // this saves us space (by avoiding to render the unique node explicitly many times for each member/seen-entry). ClusterMembership membership = 1; - // The following fields will use compressed UniqueNode encoding and ONLY serialize them as their uniqueNodeID. - // During deserialization the fields can be resolved against the membership to obtain full UniqueNode values if necessary. - uint64 ownerUniqueNodeID = 2; + // The following fields will use compressed ClusterNode encoding and ONLY serialize them as their nodeID. + // During deserialization the fields can be resolved against the membership to obtain full ClusterNode values if necessary. + uint64 ownerClusterNodeID = 2; ClusterMembershipSeenTable seenTable = 3; } @@ -66,6 +66,6 @@ message ClusterMembershipSeenTable { } message ClusterMembershipSeenTableRow { - uint64 uniqueNodeID = 1; + uint64 nodeID = 1; VersionVector version = 2; } diff --git a/Protos/Cluster/SWIM/SWIM.proto b/Protos/Cluster/SWIM/SWIM.proto index 1ce950ddc..903dbb746 100644 --- a/Protos/Cluster/SWIM/SWIM.proto +++ b/Protos/Cluster/SWIM/SWIM.proto @@ -48,7 +48,7 @@ message SWIMStatus { Type type = 1; uint64 incarnation = 2; - repeated UniqueNode suspectedBy = 3; + repeated ClusterNode suspectedBy = 3; } message SWIMMember { diff --git a/Protos/WireProtocol.proto b/Protos/WireProtocol.proto index 257f283dd..f7bb395d7 100644 --- a/Protos/WireProtocol.proto +++ b/Protos/WireProtocol.proto @@ -24,9 +24,9 @@ import "Serialization/Serialization.proto"; // ==== Handshake ------------------------------------------------------------------------------------------------------ message HandshakeOffer { - ProtocolVersion version = 1; - UniqueNode originNode = 2; - Node targetNode = 3; + ProtocolVersion version = 1; + ClusterNode originNode = 2; + ClusterEndpoint targetEndpoint = 3; // In the future we may want to add additional information // about certain capabilities here. E.g. when a node supports // faster transport like InfiniBand and the likes, so we can @@ -43,14 +43,14 @@ message HandshakeResponse { message HandshakeAccept { ProtocolVersion version = 1; - UniqueNode originNode = 2; - UniqueNode targetNode = 3; + ClusterNode originNode = 2; + ClusterNode targetNode = 3; } message HandshakeReject { ProtocolVersion version = 1; - UniqueNode originNode = 2; - UniqueNode targetNode = 3; + ClusterNode originNode = 2; + ClusterNode targetNode = 3; string reason = 4; } @@ -66,16 +66,16 @@ message Envelope { // System messages have to be reliable, therefore they need to be acknowledged // by the receiving node. message SystemEnvelope { - uint64 sequenceNr = 1; - UniqueNode from = 2; + uint64 sequenceNr = 1; + ClusterNode from = 2; - Manifest manifest = 3; - bytes payload = 4; + Manifest manifest = 3; + bytes payload = 4; } message SystemAck { uint64 sequenceNr = 1; - UniqueNode from = 2; + ClusterNode from = 2; } // The version is represented as 4 bytes: diff --git a/Samples/Sources/SampleDiningPhilosophers/DistributedDiningPhilosophers.swift b/Samples/Sources/SampleDiningPhilosophers/DistributedDiningPhilosophers.swift index aab320871..13b62e719 100644 --- a/Samples/Sources/SampleDiningPhilosophers/DistributedDiningPhilosophers.swift +++ b/Samples/Sources/SampleDiningPhilosophers/DistributedDiningPhilosophers.swift @@ -35,9 +35,9 @@ final class DistributedDiningPhilosophers { print("~~~~~~~ started \(systems.count) actor systems ~~~~~~~") // TODO: Joining to be simplified by having "seed nodes" (that a node should join) - systemA.cluster.join(node: systemB.settings.node) - systemA.cluster.join(node: systemC.settings.node) - systemC.cluster.join(node: systemB.settings.node) + systemA.cluster.join(endpoint: systemB.settings.endpoint) + systemA.cluster.join(endpoint: systemC.settings.endpoint) + systemC.cluster.join(endpoint: systemB.settings.endpoint) print("waiting for cluster to form...") try await self.ensureCluster(systems, within: .seconds(10)) @@ -71,7 +71,7 @@ final class DistributedDiningPhilosophers { } private func ensureCluster(_ systems: [ClusterSystem], within: Duration) async throws { - let nodes = Set(systems.map(\.settings.uniqueBindNode)) + let nodes = Set(systems.map(\.settings.bindNode)) try await withThrowingTaskGroup(of: Void.self) { group in for system in systems { diff --git a/Sources/DistributedActorsTestKit/Cluster/ClusteredActorSystemsXCTestCase.swift b/Sources/DistributedActorsTestKit/Cluster/ClusteredActorSystemsXCTestCase.swift index 7c409528d..c3abba3ba 100644 --- a/Sources/DistributedActorsTestKit/Cluster/ClusteredActorSystemsXCTestCase.swift +++ b/Sources/DistributedActorsTestKit/Cluster/ClusteredActorSystemsXCTestCase.swift @@ -115,7 +115,7 @@ open class ClusteredActorSystemsXCTestCase: XCTestCase { open func setUpNode(_ name: String, _ modifySettings: ((inout ClusterSystemSettings) -> Void)? = nil) async -> ClusterSystem { let node = await ClusterSystem(name) { settings in settings.enabled = true - settings.node.port = self.nextPort() + settings.endpoint.port = self.nextPort() if self.captureLogs { var captureSettings = LogCapture.Settings() @@ -187,7 +187,7 @@ open class ClusteredActorSystemsXCTestCase: XCTestCase { } public func testKit(_ system: ClusterSystem) -> ActorTestKit { - guard let idx = self._nodes.firstIndex(where: { s in s.cluster.uniqueNode == system.cluster.uniqueNode }) else { + guard let idx = self._nodes.firstIndex(where: { s in s.cluster.node == system.cluster.node }) else { fatalError("Must only call with system that was spawned using `setUpNode()`, was: \(system)") } @@ -201,36 +201,36 @@ open class ClusteredActorSystemsXCTestCase: XCTestCase { ensureWithin: Duration? = nil, ensureMembers maybeExpectedStatus: Cluster.MemberStatus? = nil, file: StaticString = #filePath, line: UInt = #line ) async throws { - node.cluster.join(node: other.cluster.uniqueNode.node) + node.cluster.join(endpoint: other.cluster.node.endpoint) - try assertAssociated(node, withAtLeast: other.settings.uniqueBindNode) - try assertAssociated(other, withAtLeast: node.settings.uniqueBindNode) + try assertAssociated(node, withAtLeast: other.settings.bindNode) + try assertAssociated(other, withAtLeast: node.settings.bindNode) if let expectedStatus = maybeExpectedStatus { if let specificTimeout = ensureWithin { - try await self.ensureNodes(expectedStatus, on: node, within: specificTimeout, nodes: other.cluster.uniqueNode, file: file, line: line) + try await self.ensureNodes(expectedStatus, on: node, within: specificTimeout, nodes: other.cluster.node, file: file, line: line) } else { - try await self.ensureNodes(expectedStatus, on: node, nodes: other.cluster.uniqueNode, file: file, line: line) + try await self.ensureNodes(expectedStatus, on: node, nodes: other.cluster.node, file: file, line: line) } } } public func ensureNodes( - _ status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: UniqueNode..., + _ status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: Cluster.Node..., file: StaticString = #filePath, line: UInt = #line ) async throws { try await self.ensureNodes(status, on: system, within: within, nodes: nodes, file: file, line: line) } public func ensureNodes( - atLeast status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: UniqueNode..., + atLeast status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: Cluster.Node..., file: StaticString = #filePath, line: UInt = #line ) async throws { try await self.ensureNodes(atLeast: status, on: system, within: within, nodes: nodes, file: file, line: line) } public func ensureNodes( - _ status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: [UniqueNode], + _ status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: [Cluster.Node], file: StaticString = #filePath, line: UInt = #line ) async throws { guard let onSystem = system ?? self._nodes.first(where: { !$0.isShuttingDown }) else { @@ -246,7 +246,7 @@ open class ClusteredActorSystemsXCTestCase: XCTestCase { } public func ensureNodes( - atLeast status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: [UniqueNode], + atLeast status: Cluster.MemberStatus, on system: ClusterSystem? = nil, within: Duration = .seconds(20), nodes: [Cluster.Node], file: StaticString = #filePath, line: UInt = #line ) async throws { guard let onSystem = system ?? self._nodes.first(where: { !$0.isShuttingDown }) else { @@ -273,7 +273,7 @@ extension ClusteredActorSystemsXCTestCase { system.cluster.ref.tell(.query(.currentMembership(p.ref))) let membership = try! p.expectMessage() - let info = "Membership on [\(reflecting: system.cluster.uniqueNode)]: \(membership.prettyDescription)" + let info = "Membership on [\(reflecting: system.cluster.node)]: \(membership.prettyDescription)" p.stop() @@ -322,7 +322,7 @@ extension ClusteredActorSystemsXCTestCase { extension ClusteredActorSystemsXCTestCase { public func assertAssociated( - _ system: ClusterSystem, withAtLeast node: UniqueNode, + _ system: ClusterSystem, withAtLeast node: Cluster.Node, timeout: Duration? = nil, interval: Duration? = nil, verbose: Bool = false, file: StaticString = #filePath, line: UInt = #line, column: UInt = #column ) throws { @@ -333,7 +333,7 @@ extension ClusteredActorSystemsXCTestCase { } public func assertAssociated( - _ system: ClusterSystem, withExactly node: UniqueNode, + _ system: ClusterSystem, withExactly node: Cluster.Node, timeout: Duration? = nil, interval: Duration? = nil, verbose: Bool = false, file: StaticString = #filePath, line: UInt = #line, column: UInt = #column ) throws { @@ -350,8 +350,8 @@ extension ClusteredActorSystemsXCTestCase { /// - withAtLeast: sub-set of nodes that must be associated public func assertAssociated( _ system: ClusterSystem, - withExactly exactlyNodes: [UniqueNode] = [], - withAtLeast atLeastNodes: [UniqueNode] = [], + withExactly exactlyNodes: [Cluster.Node] = [], + withAtLeast atLeastNodes: [Cluster.Node] = [], timeout: Duration? = nil, interval: Duration? = nil, verbose: Bool = false, file: StaticString = #filePath, line: UInt = #line, column: UInt = #column ) throws { @@ -360,7 +360,7 @@ extension ClusteredActorSystemsXCTestCase { let testKit = self.testKit(system) - let probe = testKit.makeTestProbe(.prefixed(with: "probe-assertAssociated"), expecting: Set.self, file: file, line: line) + let probe = testKit.makeTestProbe(.prefixed(with: "probe-assertAssociated"), expecting: Set.self, file: file, line: line) defer { probe.stop() } try testKit.eventually(within: timeout ?? .seconds(8), file: file, line: line, column: column) { @@ -368,7 +368,7 @@ extension ClusteredActorSystemsXCTestCase { let associatedNodes = try probe.expectMessage(file: file, line: line) if verbose { - pprint(" Self: \(String(reflecting: system.settings.uniqueBindNode))") + pprint(" Self: \(String(reflecting: system.settings.bindNode))") pprint(" Associated nodes: \(associatedNodes.map { String(reflecting: $0) })") pprint(" Expected exact nodes: \(String(reflecting: exactlyNodes))") pprint("Expected at least nodes: \(String(reflecting: atLeastNodes))") @@ -400,19 +400,19 @@ extension ClusteredActorSystemsXCTestCase { } public func assertNotAssociated( - system: ClusterSystem, node: UniqueNode, + system: ClusterSystem, node: Cluster.Node, timeout: Duration? = nil, interval: Duration? = nil, verbose: Bool = false ) throws { let testKit: ActorTestKit = self.testKit(system) - let probe = testKit.makeTestProbe(.prefixed(with: "assertNotAssociated-probe"), expecting: Set.self) + let probe = testKit.makeTestProbe(.prefixed(with: "assertNotAssociated-probe"), expecting: Set.self) defer { probe.stop() } try testKit.assertHolds(for: timeout ?? .seconds(1)) { system.cluster.ref.tell(.query(.associatedNodes(probe.ref))) let associatedNodes = try probe.expectMessage() // TODO: use interval here if verbose { - pprint(" Self: \(String(reflecting: system.settings.uniqueBindNode))") + pprint(" Self: \(String(reflecting: system.settings.bindNode))") pprint(" Associated nodes: \(associatedNodes.map { String(reflecting: $0) })") pprint(" Not expected node: \(String(reflecting: node))") } @@ -425,7 +425,7 @@ extension ClusteredActorSystemsXCTestCase { /// Asserts the given member node has the expected `status` within the duration. public func assertMemberStatus( - on system: ClusterSystem, node: UniqueNode, + on system: ClusterSystem, node: Cluster.Node, is expectedStatus: Cluster.MemberStatus, within: Duration, file: StaticString = #filePath, line: UInt = #line @@ -437,11 +437,11 @@ extension ClusteredActorSystemsXCTestCase { } catch let error as Cluster.MembershipError { switch error.underlying.error { case .notFound: - throw testKit.error("Expected [\(system.cluster.uniqueNode)] to know about [\(node)] member", file: file, line: line) + throw testKit.error("Expected [\(system.cluster.node)] to know about [\(node)] member", file: file, line: line) case .statusRequirementNotMet(_, let foundMember): throw testKit.error( """ - Expected \(reflecting: foundMember.uniqueNode) on \(reflecting: system.cluster.uniqueNode) \ + Expected \(reflecting: foundMember.node) on \(reflecting: system.cluster.node) \ to be seen as: [\(expectedStatus)], but was [\(foundMember.status)] """, file: file, @@ -454,7 +454,7 @@ extension ClusteredActorSystemsXCTestCase { } public func assertMemberStatus( - on system: ClusterSystem, node: UniqueNode, + on system: ClusterSystem, node: Cluster.Node, atLeast expectedAtLeastStatus: Cluster.MemberStatus, within: Duration, file: StaticString = #filePath, line: UInt = #line @@ -466,11 +466,11 @@ extension ClusteredActorSystemsXCTestCase { } catch let error as Cluster.MembershipError { switch error.underlying.error { case .notFound: - throw testKit.error("Expected [\(system.cluster.uniqueNode)] to know about [\(node)] member", file: file, line: line) + throw testKit.error("Expected [\(system.cluster.node)] to know about [\(node)] member", file: file, line: line) case .atLeastStatusRequirementNotMet(_, let foundMember): throw testKit.error( """ - Expected \(reflecting: foundMember.uniqueNode) on \(reflecting: system.cluster.uniqueNode) \ + Expected \(reflecting: foundMember.node) on \(reflecting: system.cluster.node) \ to be seen as at-least: [\(expectedAtLeastStatus)], but was [\(foundMember.status)] """, file: file, @@ -483,7 +483,7 @@ extension ClusteredActorSystemsXCTestCase { } /// Assert based on the event stream of ``Cluster/Event`` that the given `node` was downed or removed. - public func assertMemberDown(_ eventStreamProbe: ActorTestProbe, node: UniqueNode) throws { + public func assertMemberDown(_ eventStreamProbe: ActorTestProbe, node: Cluster.Node) throws { let events = try eventStreamProbe.fishFor(Cluster.Event.self, within: .seconds(5)) { switch $0 { case .membershipChange(let change) @@ -503,7 +503,7 @@ extension ClusteredActorSystemsXCTestCase { /// /// An error is thrown but NOT failing the test; use in pair with `testKit.eventually` to achieve the expected behavior. public func assertLeaderNode( - on system: ClusterSystem, is expectedNode: UniqueNode?, + on system: ClusterSystem, is expectedNode: Cluster.Node?, file: StaticString = #filePath, line: UInt = #line ) throws { let testKit = self.testKit(system) @@ -514,9 +514,9 @@ extension ClusteredActorSystemsXCTestCase { system.cluster.ref.tell(.query(.currentMembership(p.ref))) let membership = try p.expectMessage() - let leaderNode = membership.leader?.uniqueNode + let leaderNode = membership.leader?.node if leaderNode != expectedNode { - throw testKit.error("Expected \(reflecting: expectedNode) to be leader node on \(reflecting: system.cluster.uniqueNode) but was [\(reflecting: leaderNode)]") + throw testKit.error("Expected \(reflecting: expectedNode) to be leader node on \(reflecting: system.cluster.node) but was [\(reflecting: leaderNode)]") } } } @@ -528,7 +528,7 @@ extension ClusteredActorSystemsXCTestCase { public func resolveRef(_ system: ClusterSystem, type: M.Type, id: ActorID, on targetSystem: ClusterSystem) -> _ActorRef { // DO NOT TRY THIS AT HOME; we do this since we have no receptionist which could offer us references // first we manually construct the "right remote path", DO NOT ABUSE THIS IN REAL CODE (please) :-) - let remoteNode = targetSystem.settings.uniqueBindNode + let remoteNode = targetSystem.settings.bindNode let uniqueRemoteNode = ActorID(remote: remoteNode, path: id.path, incarnation: id.incarnation) let resolveContext = _ResolveContext(id: uniqueRemoteNode, system: system) diff --git a/Sources/DistributedCluster/ActorID.swift b/Sources/DistributedCluster/ActorID.swift index ed0b8d268..b12d37e78 100644 --- a/Sources/DistributedCluster/ActorID.swift +++ b/Sources/DistributedCluster/ActorID.swift @@ -135,7 +135,7 @@ extension ClusterSystem { internal var _location: ActorLocation /// The unique node on which the actor identified by this identity is located. - public var uniqueNode: UniqueNode { + public var node: Cluster.Node { switch self._location { case .local(let node): return node case .remote(let node): return node @@ -182,7 +182,7 @@ extension ClusterSystem { public let incarnation: ActorIncarnation // TODO(distributed): remove this initializer, as it is only for Behavior actors - init(local node: UniqueNode, path: ActorPath?, incarnation: ActorIncarnation) { + init(local node: Cluster.Node, path: ActorPath?, incarnation: ActorIncarnation) { self.context = .init(lifecycle: nil, remoteCallInterceptor: nil) self._location = .local(node) self.incarnation = incarnation @@ -193,7 +193,7 @@ extension ClusterSystem { } // TODO(distributed): remove this initializer, as it is only for Behavior actors - init(remote node: UniqueNode, path: ActorPath?, incarnation: ActorIncarnation) { + init(remote node: Cluster.Node, path: ActorPath?, incarnation: ActorIncarnation) { self.context = .init(lifecycle: nil, remoteCallInterceptor: nil) self._location = .remote(node) self.incarnation = incarnation @@ -203,7 +203,7 @@ extension ClusterSystem { traceLog_DeathWatch("Made ID: \(self)") } - public init(remote node: UniqueNode, type: Act.Type, incarnation: ActorIncarnation) + public init(remote node: Cluster.Node, type: Act.Type, incarnation: ActorIncarnation) where Act: DistributedActor, Act.ActorSystem == ClusterSystem { self.context = .init(lifecycle: nil, remoteCallInterceptor: nil) @@ -215,7 +215,7 @@ extension ClusterSystem { traceLog_DeathWatch("Made ID: \(self)") } - init(local node: UniqueNode, type: Act.Type, incarnation: ActorIncarnation, + init(local node: Cluster.Node, type: Act.Type, incarnation: ActorIncarnation, context: DistributedActorContext) where Act: DistributedActor, Act.ActorSystem == ClusterSystem { @@ -228,7 +228,7 @@ extension ClusterSystem { traceLog_DeathWatch("Made ID: \(self)") } - init(remote node: UniqueNode, type: Act.Type, incarnation: ActorIncarnation, + init(remote node: Cluster.Node, type: Act.Type, incarnation: ActorIncarnation, context: DistributedActorContext) where Act: DistributedActor, Act.ActorSystem == ClusterSystem { @@ -288,7 +288,7 @@ extension ActorID: Hashable { // If we're comparing "well known" actors, we ignore the concrete incarnation, // and compare the well known name instead. This works for example for "$receptionist" // and other well known names, that can be resolved using them, without an incarnation number. - if lhsWellKnownName == rhsWellKnownName, lhs.uniqueNode == rhs.uniqueNode { + if lhsWellKnownName == rhsWellKnownName, lhs.node == rhs.node { return true } } else { @@ -304,7 +304,7 @@ extension ActorID: Hashable { // if they happen to be equal, we don't know yet for sure if it's the same actor or not, // as incarnation is just a random ID thus we need to compare the node and path as well return lhs.incarnation == rhs.incarnation && - lhs.uniqueNode == rhs.uniqueNode && + lhs.node == rhs.node && lhs.path == rhs.path } @@ -314,7 +314,7 @@ extension ActorID: Hashable { } else { hasher.combine(self.incarnation) } - hasher.combine(self.uniqueNode) + hasher.combine(self.node) hasher.combine(self.path) } } @@ -323,7 +323,7 @@ extension ActorID: CustomStringConvertible { public var description: String { var res = "" if self._isRemote { - res += "\(self.uniqueNode)" + res += "\(self.node)" } if let wellKnown = self.metadata.wellKnown { @@ -352,7 +352,7 @@ extension ActorID: CustomStringConvertible { public var detailedDescription: String { var res = "" if self._isRemote { - res += "\(reflecting: self.uniqueNode)" + res += "\(reflecting: self.node)" } res += "\(self.path)" @@ -370,7 +370,7 @@ extension ActorID: CustomStringConvertible { /// Prints all information contained in the ID, including `incarnation` and all `metadata`. public var fullDescription: String { var res = "" - res += "\(reflecting: self.uniqueNode)" + res += "\(reflecting: self.node)" res += "\(self.path)" res += "#\(self.incarnation.value)" @@ -389,12 +389,12 @@ extension ActorID: CustomStringConvertible { extension ActorID { /// Local root (also known as: "/") actor address. /// Only to be used by the "/" root "actor" - static func _localRoot(on node: UniqueNode) -> ActorID { + static func _localRoot(on node: Cluster.Node) -> ActorID { ActorPath._root.makeLocalID(on: node, incarnation: .wellKnown) } /// Local dead letters address. - static func _deadLetters(on node: UniqueNode) -> ActorID { + static func _deadLetters(on node: Cluster.Node) -> ActorID { ActorPath._deadLetters.makeLocalID(on: node, incarnation: .wellKnown) } } @@ -413,13 +413,13 @@ extension ActorID { public var _asRemote: Self { var remote = self - remote._location = .remote(remote.uniqueNode) + remote._location = .remote(remote.node) return remote } public var _asLocal: Self { var local = self - local._location = .local(self.uniqueNode) + local._location = .local(self.node) return local } } @@ -452,14 +452,14 @@ extension ActorID: _PathRelationships { /// Offers arbitrary ordering for predictable ordered printing of things keyed by addresses. extension ActorID: Comparable { public static func < (lhs: ActorID, rhs: ActorID) -> Bool { - lhs.uniqueNode < rhs.uniqueNode || - (lhs.uniqueNode == rhs.uniqueNode && lhs.path < rhs.path) || - (lhs.uniqueNode == rhs.uniqueNode && lhs.path == rhs.path && lhs.incarnation < rhs.incarnation) + lhs.node < rhs.node || + (lhs.node == rhs.node && lhs.path < rhs.path) || + (lhs.node == rhs.node && lhs.path == rhs.path && lhs.incarnation < rhs.incarnation) } } -extension Optional: Comparable where Wrapped == UniqueNode { - public static func < (lhs: UniqueNode?, rhs: UniqueNode?) -> Bool { +extension Optional: Comparable where Wrapped == Cluster.Node { + public static func < (lhs: Cluster.Node?, rhs: Cluster.Node?) -> Bool { switch (lhs, rhs) { case (.some, .none): return false @@ -478,8 +478,8 @@ extension Optional: Comparable where Wrapped == UniqueNode { @usableFromInline internal enum ActorLocation: Hashable, Sendable { - case local(UniqueNode) - case remote(UniqueNode) + case local(Cluster.Node) + case remote(Cluster.Node) } // ==== ---------------------------------------------------------------------------------------------------------------- @@ -577,11 +577,11 @@ extension ActorPath { public static let _user: ActorPath = try! ActorPath(root: "user") public static let _system: ActorPath = try! ActorPath(root: "system") - internal func makeLocalID(on node: UniqueNode, incarnation: ActorIncarnation) -> ActorID { + internal func makeLocalID(on node: Cluster.Node, incarnation: ActorIncarnation) -> ActorID { ActorID(local: node, path: self, incarnation: incarnation) } - internal func makeRemoteID(on node: UniqueNode, incarnation: ActorIncarnation) -> ActorID { + internal func makeRemoteID(on node: Cluster.Node, incarnation: ActorIncarnation) -> ActorID { ActorID(remote: node, path: self, incarnation: incarnation) } } @@ -790,181 +790,6 @@ extension ActorIncarnation: Comparable { } } -// ==== ---------------------------------------------------------------------------------------------------------------- -// MARK: Node - -// TODO: Would want to rename; this is really protocol + host + port, and a "cute name for humans" we on purpose do not take the name as part or identity -/// A `Node` is a triplet of protocol, host and port that a node is bound to. -/// -/// Unlike `UniqueNode`, it does not carry identity (`NodeID`) of a specific incarnation of an actor system node, -/// and represents an address of _any_ node that could live under this address. During the handshake process between two nodes, -/// the remote `Node` that the local side started out to connect with is "upgraded" to a `UniqueNode`, as soon as we discover -/// the remote side's unique node identifier (`NodeID`). -/// -/// ### System name / human readable name -/// The `systemName` is NOT taken into account when comparing nodes. The system name is only utilized for human readability -/// and debugging purposes and participates neither in hashcode nor equality of a `Node`, as a node specifically is meant -/// to represent any unique node that can live on specific host & port. System names are useful for human operators, -/// intending to use some form of naming scheme, e.g. adopted from a cloud provider, to make it easier to map nodes in -/// actor system logs, to other external systems. TODO: Note also node roles, which we do not have yet... those are dynamic key/value pairs paired to a unique node. -/// -/// - SeeAlso: For more details on unique node ids, refer to: `UniqueNode`. -public struct Node: Hashable, Sendable { - // TODO: collapse into one String and index into it? - public var `protocol`: String - public var systemName: String // TODO: some other name, to signify "this is just for humans"? - public var host: String - public var port: Int - - public init(protocol: String, systemName: String, host: String, port: Int) { - precondition(port > 0, "port MUST be > 0") - self.protocol = `protocol` - self.systemName = systemName - self.host = host - self.port = port - } - - public init(systemName: String, host: String, port: Int) { - self.init(protocol: "sact", systemName: systemName, host: host, port: port) - } - - public init(host: String, port: Int) { - self.init(protocol: "sact", systemName: "", host: host, port: port) - } -} - -extension Node: CustomStringConvertible, CustomDebugStringConvertible { - public var description: String { - "\(self.protocol)://\(self.systemName)@\(self.host):\(self.port)" - } - - public var debugDescription: String { - self.description - } -} - -extension Node: Comparable { - // Silly but good enough comparison for deciding "who is lower node" - // as we only use those for "tie-breakers" any ordering is fine to be honest here. - public static func < (lhs: Node, rhs: Node) -> Bool { - "\(lhs)" < "\(rhs)" - } - - public func hash(into hasher: inout Hasher) { - hasher.combine(self.protocol) - hasher.combine(self.host) - hasher.combine(self.port) - } - - public static func == (lhs: Node, rhs: Node) -> Bool { - lhs.protocol == rhs.protocol && lhs.host == rhs.host && lhs.port == rhs.port - } -} - -// ==== ---------------------------------------------------------------------------------------------------------------- -// MARK: UniqueNode - -/// A _unique_ node which includes also the node's unique `UID` which is used to disambiguate -/// multiple incarnations of a system on the same host/port part -- similar to how an `ActorIncarnation` -/// is used on the per-actor level. -/// -/// ### Implementation details -/// The unique address of a remote node can only be obtained by performing the handshake with it. -/// Once the remote node accepts our handshake, it offers the other node its unique address. -/// Only once this address has been obtained can a node communicate with actors located on the remote node. -public struct UniqueNode: Hashable, Sendable { - public typealias ID = UniqueNodeID - - public var node: Node - public let nid: UniqueNodeID - - public init(node: Node, nid: UniqueNodeID) { - precondition(node.port > 0, "port MUST be > 0") - self.node = node - self.nid = nid - } - - public init(protocol: String, systemName: String, host: String, port: Int, nid: UniqueNodeID) { - self.init(node: Node(protocol: `protocol`, systemName: systemName, host: host, port: port), nid: nid) - } - - public init(systemName: String, host: String, port: Int, nid: UniqueNodeID) { - self.init(protocol: "sact", systemName: systemName, host: host, port: port, nid: nid) - } - - public var host: String { - set { - self.node.host = newValue - } - get { - self.node.host - } - } - - public var port: Int { - set { - self.node.port = newValue - } - get { - self.node.port - } - } -} - -extension UniqueNode: CustomStringConvertible, CustomDebugStringConvertible { - public var description: String { - "\(self.node)" - } - - public var debugDescription: String { - let a = self.node - return "\(a.protocol)://\(a.systemName):\(self.nid)@\(a.host):\(a.port)" - } -} - -extension UniqueNode: Comparable { - public static func == (lhs: UniqueNode, rhs: UniqueNode) -> Bool { - // we first compare the NodeIDs since they're quicker to compare and for diff systems always would differ, even if on same physical address - lhs.nid == rhs.nid && lhs.node == rhs.node - } - - // Silly but good enough comparison for deciding "who is lower node" - // as we only use those for "tie-breakers" any ordering is fine to be honest here. - public static func < (lhs: UniqueNode, rhs: UniqueNode) -> Bool { - if lhs.node == rhs.node { - return lhs.nid < rhs.nid - } else { - return lhs.node < rhs.node - } - } -} - -public struct UniqueNodeID: Hashable, Sendable { - let value: UInt64 - - public init(_ value: UInt64) { - self.value = value - } -} - -extension UniqueNodeID: Comparable { - public static func < (lhs: UniqueNodeID, rhs: UniqueNodeID) -> Bool { - lhs.value < rhs.value - } -} - -extension UniqueNodeID: CustomStringConvertible { - public var description: String { - "\(self.value)" - } -} - -extension UniqueNodeID { - public static func random() -> UniqueNodeID { - UniqueNodeID(UInt64.random(in: 1 ... .max)) - } -} - // ==== ---------------------------------------------------------------------------------------------------------------- // MARK: Path errors diff --git a/Sources/DistributedCluster/ActorRefProvider.swift b/Sources/DistributedCluster/ActorRefProvider.swift index 5b94ec3dd..6f81b154a 100644 --- a/Sources/DistributedCluster/ActorRefProvider.swift +++ b/Sources/DistributedCluster/ActorRefProvider.swift @@ -47,7 +47,7 @@ internal protocol _ActorRefProvider: _ActorTreeTraversable { // TODO: consider if we need abstraction / does it cost us? internal struct RemoteActorRefProvider: _ActorRefProvider { - private let localNode: UniqueNode + private let localNode: Cluster.Node private let localProvider: LocalActorRefProvider let cluster: ClusterShell @@ -61,7 +61,7 @@ internal struct RemoteActorRefProvider: _ActorRefProvider { ) { precondition(settings.enabled, "Remote actor provider should only be used when clustering is enabled") - self.localNode = settings.uniqueBindNode + self.localNode = settings.bindNode self.cluster = cluster self.localProvider = localProvider } @@ -114,7 +114,7 @@ extension RemoteActorRefProvider { } public func _resolveUntyped(context: _ResolveContext) -> _AddressableActorRef { - if self.localNode == context.id.uniqueNode { + if self.localNode == context.id.node { return self.localProvider._resolveUntyped(context: context) } else { return _AddressableActorRef(self._resolveAsRemoteRef(context, remoteAddress: context.id._asRemote)) diff --git a/Sources/DistributedCluster/Clocks/Protobuf/VersionVector+Serialization.swift b/Sources/DistributedCluster/Clocks/Protobuf/VersionVector+Serialization.swift index f033c0e1d..3d5a8d7cf 100644 --- a/Sources/DistributedCluster/Clocks/Protobuf/VersionVector+Serialization.swift +++ b/Sources/DistributedCluster/Clocks/Protobuf/VersionVector+Serialization.swift @@ -27,10 +27,10 @@ extension ReplicaID: _ProtobufRepresentable { proto.actorID = try actorID.toProto(context: context) // case .actorIdentity(let actorIdentity): // proto.actorIdentity = try actorIdentity.toProto(context: context) - case .uniqueNode(let node): - proto.uniqueNode = try node.toProto(context: context) - case .uniqueNodeID(let nid): - proto.uniqueNodeID = nid.value + case .node(let node): + proto.node = try node.toProto(context: context) + case .nodeID(let nid): + proto.nodeID = nid.value } return proto } @@ -47,11 +47,11 @@ extension ReplicaID: _ProtobufRepresentable { // case .actorIdentity(let protoIdentity): // let id = try ClusterSystem.ActorID(fromProto: protoIdentity, context: context) // self = .actorIdentity(id) - case .uniqueNode(let protoNode): - let node = try UniqueNode(fromProto: protoNode, context: context) - self = .uniqueNode(node) - case .uniqueNodeID(let nid): - self = .uniqueNodeID(nid) + case .node(let protoNode): + let node = try Cluster.Node(fromProto: protoNode, context: context) + self = .node(node) + case .nodeID(let nid): + self = .nodeID(nid) } } } @@ -76,7 +76,7 @@ extension VersionVector: _ProtobufRepresentable { return proto } - /// Serialize using uniqueNodeID specifically (or crash); + /// Serialize using nodeID specifically (or crash); /// Used in situations where an enclosing message already has the unique nodes serialized and we can save space by avoiding to serialize them again. public func toCompactReplicaNodeIDProto(context: Serialization.Context) throws -> _ProtoVersionVector { var proto = _ProtoVersionVector() @@ -84,10 +84,10 @@ extension VersionVector: _ProtobufRepresentable { let replicaVersions: [_ProtoReplicaVersion] = try self.state.map { replicaID, version in var replicaVersion = _ProtoReplicaVersion() switch replicaID.storage { - case .uniqueNode(let node): - replicaVersion.replicaID.uniqueNodeID = node.nid.value - case .uniqueNodeID(let nid): - replicaVersion.replicaID.uniqueNodeID = nid.value + case .node(let node): + replicaVersion.replicaID.nodeID = node.nid.value + case .nodeID(let nid): + replicaVersion.replicaID.nodeID = nid.value case .actorID: throw SerializationError(.unableToSerialize(hint: "Can't serialize using actor address as replica id! Was: \(replicaID)")) } diff --git a/Sources/DistributedCluster/Clocks/Protobuf/VersionVector.pb.swift b/Sources/DistributedCluster/Clocks/Protobuf/VersionVector.pb.swift index 477940c71..2dc4c0b97 100644 --- a/Sources/DistributedCluster/Clocks/Protobuf/VersionVector.pb.swift +++ b/Sources/DistributedCluster/Clocks/Protobuf/VersionVector.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Clocks/VersionVector.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -40,21 +39,24 @@ public struct _ProtoActorIdentity { // methods supported on all messages. public var manifest: _ProtoManifest { - get {return _manifest ?? _ProtoManifest()} - set {_manifest = newValue} + get {return _storage._manifest ?? _ProtoManifest()} + set {_uniqueStorage()._manifest = newValue} } /// Returns true if `manifest` has been explicitly set. - public var hasManifest: Bool {return self._manifest != nil} + public var hasManifest: Bool {return _storage._manifest != nil} /// Clears the value of `manifest`. Subsequent reads from it will return its default value. - public mutating func clearManifest() {self._manifest = nil} + public mutating func clearManifest() {_uniqueStorage()._manifest = nil} - public var payload: Data = Data() + public var payload: Data { + get {return _storage._payload} + set {_uniqueStorage()._payload = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _manifest: _ProtoManifest? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoVersionReplicaID { @@ -62,57 +64,48 @@ public struct _ProtoVersionReplicaID { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var value: _ProtoVersionReplicaID.OneOf_Value? = nil + public var value: OneOf_Value? { + get {return _storage._value} + set {_uniqueStorage()._value = newValue} + } public var actorID: _ProtoActorID { get { - if case .actorID(let v)? = value {return v} + if case .actorID(let v)? = _storage._value {return v} return _ProtoActorID() } - set {value = .actorID(newValue)} + set {_uniqueStorage()._value = .actorID(newValue)} } - public var uniqueNode: _ProtoUniqueNode { + public var node: _ProtoClusterNode { get { - if case .uniqueNode(let v)? = value {return v} - return _ProtoUniqueNode() + if case .node(let v)? = _storage._value {return v} + return _ProtoClusterNode() } - set {value = .uniqueNode(newValue)} + set {_uniqueStorage()._value = .node(newValue)} } - public var uniqueNodeID: UInt64 { + public var nodeID: UInt64 { get { - if case .uniqueNodeID(let v)? = value {return v} + if case .nodeID(let v)? = _storage._value {return v} return 0 } - set {value = .uniqueNodeID(newValue)} + set {_uniqueStorage()._value = .nodeID(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() public enum OneOf_Value: Equatable { case actorID(_ProtoActorID) - case uniqueNode(_ProtoUniqueNode) - case uniqueNodeID(UInt64) + case node(_ProtoClusterNode) + case nodeID(UInt64) #if !swift(>=4.1) public static func ==(lhs: _ProtoVersionReplicaID.OneOf_Value, rhs: _ProtoVersionReplicaID.OneOf_Value) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.actorID, .actorID): return { - guard case .actorID(let l) = lhs, case .actorID(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.uniqueNode, .uniqueNode): return { - guard case .uniqueNode(let l) = lhs, case .uniqueNode(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.uniqueNodeID, .uniqueNodeID): return { - guard case .uniqueNodeID(let l) = lhs, case .uniqueNodeID(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.actorID(let l), .actorID(let r)): return l == r + case (.node(let l), .node(let r)): return l == r + case (.nodeID(let l), .nodeID(let r)): return l == r default: return false } } @@ -120,6 +113,8 @@ public struct _ProtoVersionReplicaID { } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoReplicaVersion { @@ -128,21 +123,24 @@ public struct _ProtoReplicaVersion { // methods supported on all messages. public var replicaID: _ProtoVersionReplicaID { - get {return _replicaID ?? _ProtoVersionReplicaID()} - set {_replicaID = newValue} + get {return _storage._replicaID ?? _ProtoVersionReplicaID()} + set {_uniqueStorage()._replicaID = newValue} } /// Returns true if `replicaID` has been explicitly set. - public var hasReplicaID: Bool {return self._replicaID != nil} + public var hasReplicaID: Bool {return _storage._replicaID != nil} /// Clears the value of `replicaID`. Subsequent reads from it will return its default value. - public mutating func clearReplicaID() {self._replicaID = nil} + public mutating func clearReplicaID() {_uniqueStorage()._replicaID = nil} - public var version: UInt64 = 0 + public var version: UInt64 { + get {return _storage._version} + set {_uniqueStorage()._version = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _replicaID: _ProtoVersionReplicaID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoVersionVector { @@ -164,21 +162,24 @@ public struct _ProtoVersionDot { // methods supported on all messages. public var replicaID: _ProtoVersionReplicaID { - get {return _replicaID ?? _ProtoVersionReplicaID()} - set {_replicaID = newValue} + get {return _storage._replicaID ?? _ProtoVersionReplicaID()} + set {_uniqueStorage()._replicaID = newValue} } /// Returns true if `replicaID` has been explicitly set. - public var hasReplicaID: Bool {return self._replicaID != nil} + public var hasReplicaID: Bool {return _storage._replicaID != nil} /// Clears the value of `replicaID`. Subsequent reads from it will return its default value. - public mutating func clearReplicaID() {self._replicaID = nil} + public mutating func clearReplicaID() {_uniqueStorage()._replicaID = nil} - public var version: UInt64 = 0 + public var version: UInt64 { + get {return _storage._version} + set {_uniqueStorage()._version = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _replicaID: _ProtoVersionReplicaID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } /// A dot and its arbitrary, serialized element @@ -188,32 +189,34 @@ public struct _ProtoVersionDottedElementEnvelope { // methods supported on all messages. public var dot: _ProtoVersionDot { - get {return _dot ?? _ProtoVersionDot()} - set {_dot = newValue} + get {return _storage._dot ?? _ProtoVersionDot()} + set {_uniqueStorage()._dot = newValue} } /// Returns true if `dot` has been explicitly set. - public var hasDot: Bool {return self._dot != nil} + public var hasDot: Bool {return _storage._dot != nil} /// Clears the value of `dot`. Subsequent reads from it will return its default value. - public mutating func clearDot() {self._dot = nil} + public mutating func clearDot() {_uniqueStorage()._dot = nil} /// ~~ element ~~ public var manifest: _ProtoManifest { - get {return _manifest ?? _ProtoManifest()} - set {_manifest = newValue} + get {return _storage._manifest ?? _ProtoManifest()} + set {_uniqueStorage()._manifest = newValue} } /// Returns true if `manifest` has been explicitly set. - public var hasManifest: Bool {return self._manifest != nil} + public var hasManifest: Bool {return _storage._manifest != nil} /// Clears the value of `manifest`. Subsequent reads from it will return its default value. - public mutating func clearManifest() {self._manifest = nil} + public mutating func clearManifest() {_uniqueStorage()._manifest = nil} - public var payload: Data = Data() + public var payload: Data { + get {return _storage._payload} + set {_uniqueStorage()._payload = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _dot: _ProtoVersionDot? = nil - fileprivate var _manifest: _ProtoManifest? = nil + fileprivate var _storage = _StorageClass.defaultInstance } // MARK: - Code below here is support for the SwiftProtobuf runtime. @@ -225,32 +228,63 @@ extension _ProtoActorIdentity: SwiftProtobuf.Message, SwiftProtobuf._MessageImpl 2: .same(proto: "payload"), ] + fileprivate class _StorageClass { + var _manifest: _ProtoManifest? = nil + var _payload: Data = SwiftProtobuf.Internal.emptyData + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _manifest = source._manifest + _payload = source._payload + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._manifest) }() - case 2: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._manifest) + case 2: try decoder.decodeSingularBytesField(value: &_storage._payload) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._manifest { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._manifest { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if !_storage._payload.isEmpty { + try visitor.visitSingularBytesField(value: _storage._payload, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoActorIdentity, rhs: _ProtoActorIdentity) -> Bool { - if lhs._manifest != rhs._manifest {return false} - if lhs.payload != rhs.payload {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._manifest != rhs_storage._manifest {return false} + if _storage._payload != rhs_storage._payload {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -260,79 +294,86 @@ extension _ProtoVersionReplicaID: SwiftProtobuf.Message, SwiftProtobuf._MessageI public static let protoMessageName: String = "VersionReplicaID" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "actorID"), - 2: .same(proto: "uniqueNode"), - 3: .same(proto: "uniqueNodeID"), + 2: .same(proto: "node"), + 3: .same(proto: "nodeID"), ] + fileprivate class _StorageClass { + var _value: _ProtoVersionReplicaID.OneOf_Value? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _value = source._value + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoActorID? - var hadOneofValue = false - if let current = self.value { - hadOneofValue = true - if case .actorID(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.value = .actorID(v) - } - }() - case 2: try { - var v: _ProtoUniqueNode? - var hadOneofValue = false - if let current = self.value { - hadOneofValue = true - if case .uniqueNode(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.value = .uniqueNode(v) + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoActorID? + if let current = _storage._value { + try decoder.handleConflictingOneOf() + if case .actorID(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._value = .actorID(v)} + case 2: + var v: _ProtoClusterNode? + if let current = _storage._value { + try decoder.handleConflictingOneOf() + if case .node(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._value = .node(v)} + case 3: + if _storage._value != nil {try decoder.handleConflictingOneOf()} + var v: UInt64? + try decoder.decodeSingularUInt64Field(value: &v) + if let v = v {_storage._value = .nodeID(v)} + default: break } - }() - case 3: try { - var v: UInt64? - try decoder.decodeSingularUInt64Field(value: &v) - if let v = v { - if self.value != nil {try decoder.handleConflictingOneOf()} - self.value = .uniqueNodeID(v) - } - }() - default: break } } } public func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.value { - case .actorID?: try { - guard case .actorID(let v)? = self.value else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .uniqueNode?: try { - guard case .uniqueNode(let v)? = self.value else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case .uniqueNodeID?: try { - guard case .uniqueNodeID(let v)? = self.value else { preconditionFailure() } - try visitor.visitSingularUInt64Field(value: v, fieldNumber: 3) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._value { + case .actorID(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .node(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case .nodeID(let v)?: + try visitor.visitSingularUInt64Field(value: v, fieldNumber: 3) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoVersionReplicaID, rhs: _ProtoVersionReplicaID) -> Bool { - if lhs.value != rhs.value {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._value != rhs_storage._value {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -345,32 +386,63 @@ extension _ProtoReplicaVersion: SwiftProtobuf.Message, SwiftProtobuf._MessageImp 2: .same(proto: "version"), ] + fileprivate class _StorageClass { + var _replicaID: _ProtoVersionReplicaID? = nil + var _version: UInt64 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _replicaID = source._replicaID + _version = source._version + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._replicaID) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.version) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._replicaID) + case 2: try decoder.decodeSingularUInt64Field(value: &_storage._version) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._replicaID { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.version != 0 { - try visitor.visitSingularUInt64Field(value: self.version, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._replicaID { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._version != 0 { + try visitor.visitSingularUInt64Field(value: _storage._version, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoReplicaVersion, rhs: _ProtoReplicaVersion) -> Bool { - if lhs._replicaID != rhs._replicaID {return false} - if lhs.version != rhs.version {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._replicaID != rhs_storage._replicaID {return false} + if _storage._version != rhs_storage._version {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -384,11 +456,8 @@ extension _ProtoVersionVector: SwiftProtobuf.Message, SwiftProtobuf._MessageImpl public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeRepeatedMessageField(value: &self.state) }() + case 1: try decoder.decodeRepeatedMessageField(value: &self.state) default: break } } @@ -415,32 +484,63 @@ extension _ProtoVersionDot: SwiftProtobuf.Message, SwiftProtobuf._MessageImpleme 2: .same(proto: "version"), ] + fileprivate class _StorageClass { + var _replicaID: _ProtoVersionReplicaID? = nil + var _version: UInt64 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _replicaID = source._replicaID + _version = source._version + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._replicaID) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.version) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._replicaID) + case 2: try decoder.decodeSingularUInt64Field(value: &_storage._version) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._replicaID { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.version != 0 { - try visitor.visitSingularUInt64Field(value: self.version, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._replicaID { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._version != 0 { + try visitor.visitSingularUInt64Field(value: _storage._version, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoVersionDot, rhs: _ProtoVersionDot) -> Bool { - if lhs._replicaID != rhs._replicaID {return false} - if lhs.version != rhs.version {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._replicaID != rhs_storage._replicaID {return false} + if _storage._version != rhs_storage._version {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -454,37 +554,70 @@ extension _ProtoVersionDottedElementEnvelope: SwiftProtobuf.Message, SwiftProtob 3: .same(proto: "payload"), ] + fileprivate class _StorageClass { + var _dot: _ProtoVersionDot? = nil + var _manifest: _ProtoManifest? = nil + var _payload: Data = SwiftProtobuf.Internal.emptyData + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _dot = source._dot + _manifest = source._manifest + _payload = source._payload + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._dot) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._manifest) }() - case 3: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._dot) + case 2: try decoder.decodeSingularMessageField(value: &_storage._manifest) + case 3: try decoder.decodeSingularBytesField(value: &_storage._payload) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._dot { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._manifest { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._dot { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._manifest { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if !_storage._payload.isEmpty { + try visitor.visitSingularBytesField(value: _storage._payload, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoVersionDottedElementEnvelope, rhs: _ProtoVersionDottedElementEnvelope) -> Bool { - if lhs._dot != rhs._dot {return false} - if lhs._manifest != rhs._manifest {return false} - if lhs.payload != rhs.payload {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._dot != rhs_storage._dot {return false} + if _storage._manifest != rhs_storage._manifest {return false} + if _storage._payload != rhs_storage._payload {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } diff --git a/Sources/DistributedCluster/Clocks/VersionVector.swift b/Sources/DistributedCluster/Clocks/VersionVector.swift index 31b1c3014..c29ec4e6e 100644 --- a/Sources/DistributedCluster/Clocks/VersionVector.swift +++ b/Sources/DistributedCluster/Clocks/VersionVector.swift @@ -245,8 +245,8 @@ internal struct ReplicaID: Hashable { internal enum Storage: Hashable { case actorID(ActorID) // case actorIdentity(ClusterSystem.ActorID) - case uniqueNode(UniqueNode) - case uniqueNodeID(UniqueNode.ID) + case node(Cluster.Node) + case nodeID(Cluster.Node.ID) var isActorID: Bool { switch self { @@ -262,16 +262,16 @@ internal struct ReplicaID: Hashable { // } // } - var isUniqueNode: Bool { + var isNode: Bool { switch self { - case .uniqueNode: return true + case .node: return true default: return false } } - var isUniqueNodeID: Bool { + var isNodeID: Bool { switch self { - case .uniqueNodeID: return true + case .nodeID: return true default: return false } } @@ -291,28 +291,28 @@ internal struct ReplicaID: Hashable { .init(.actorID(id)) } - static func uniqueNode(_ uniqueNode: UniqueNode) -> ReplicaID { - .init(.uniqueNode(uniqueNode)) + static func node(_ node: Cluster.Node) -> ReplicaID { + .init(.node(node)) } - static func uniqueNodeID(_ uniqueNode: UniqueNode) -> ReplicaID { - .init(.uniqueNodeID(uniqueNode.nid)) + static func nodeID(_ node: Cluster.Node) -> ReplicaID { + .init(.nodeID(node.nid)) } - internal static func uniqueNodeID(_ uniqueNodeID: UInt64) -> ReplicaID { - .init(.uniqueNodeID(.init(uniqueNodeID))) + internal static func nodeID(_ nodeID: UInt64) -> ReplicaID { + .init(.nodeID(.init(nodeID))) } - func ensuringNode(_ node: UniqueNode) -> ReplicaID { + func ensuringNode(_ node: Cluster.Node) -> ReplicaID { switch self.storage { case .actorID(let id): return .actorID(id) - case .uniqueNode(let existingNode): + case .node(let existingNode): assert(existingNode.nid == node.nid, "Attempted to ensureNode with non-matching node identifier, was: \(existingNode)], attempted: \(node)") return self - case .uniqueNodeID(let nid): // drops the nid + case .nodeID(let nid): // drops the nid assert(nid == node.nid, "Attempted to ensureNode with non-matching node identifier, was: \(nid)], attempted: \(node)") - return .uniqueNode(node) + return .node(node) } } } @@ -322,10 +322,10 @@ extension ReplicaID: CustomStringConvertible { switch self.storage { case .actorID(let id): return "actor:\(id)" - case .uniqueNode(let node): - return "uniqueNode:\(node)" - case .uniqueNodeID(let nid): - return "uniqueNodeID:\(nid)" + case .node(let node): + return "node:\(node)" + case .nodeID(let nid): + return "nodeID:\(nid)" } } } @@ -335,11 +335,11 @@ extension ReplicaID: Comparable { switch (lhs.storage, rhs.storage) { case (.actorID(let l), .actorID(let r)): return l < r - case (.uniqueNode(let l), .uniqueNode(let r)): + case (.node(let l), .node(let r)): return l < r - case (.uniqueNodeID(let l), .uniqueNodeID(let r)): + case (.nodeID(let l), .nodeID(let r)): return l < r - case (.uniqueNode, _), (.uniqueNodeID, _), (.actorID, _): + case (.node, _), (.nodeID, _), (.actorID, _): return false } } @@ -349,17 +349,17 @@ extension ReplicaID: Comparable { case (.actorID(let l), .actorID(let r)): return l == r - case (.uniqueNode(let l), .uniqueNode(let r)): + case (.node(let l), .node(let r)): return l == r - case (.uniqueNodeID(let l), .uniqueNodeID(let r)): + case (.nodeID(let l), .nodeID(let r)): return l == r - case (.uniqueNode(let l), .uniqueNodeID(let r)): + case (.node(let l), .nodeID(let r)): return l.nid == r - case (.uniqueNodeID(let l), .uniqueNode(let r)): + case (.nodeID(let l), .node(let r)): return l == r.nid - case (.uniqueNode, _), (.uniqueNodeID, _), (.actorID, _): + case (.node, _), (.nodeID, _), (.actorID, _): return false } } @@ -368,8 +368,8 @@ extension ReplicaID: Comparable { extension ReplicaID: Codable { enum DiscriminatorKeys: String, Codable { case actorID = "a" - case uniqueNode = "N" - case uniqueNodeID = "n" + case node = "N" + case nodeID = "n" } enum CodingKeys: CodingKey { @@ -383,10 +383,10 @@ extension ReplicaID: Codable { switch try container.decode(DiscriminatorKeys.self, forKey: ._case) { case .actorID: self = try .actorID(container.decode(ActorID.self, forKey: .value)) - case .uniqueNode: - self = try .uniqueNode(container.decode(UniqueNode.self, forKey: .value)) - case .uniqueNodeID: - self = try .uniqueNodeID(container.decode(UInt64.self, forKey: .value)) + case .node: + self = try .node(container.decode(Cluster.Node.self, forKey: .value)) + case .nodeID: + self = try .nodeID(container.decode(UInt64.self, forKey: .value)) } } @@ -396,11 +396,11 @@ extension ReplicaID: Codable { case .actorID(let address): try container.encode(DiscriminatorKeys.actorID, forKey: ._case) try container.encode(address, forKey: .value) - case .uniqueNode(let node): - try container.encode(DiscriminatorKeys.uniqueNode, forKey: ._case) + case .node(let node): + try container.encode(DiscriminatorKeys.node, forKey: ._case) try container.encode(node, forKey: .value) - case .uniqueNodeID(let nid): - try container.encode(DiscriminatorKeys.uniqueNodeID, forKey: ._case) + case .nodeID(let nid): + try container.encode(DiscriminatorKeys.nodeID, forKey: ._case) try container.encode(nid.value, forKey: .value) } } diff --git a/Sources/DistributedCluster/Cluster/Association.swift b/Sources/DistributedCluster/Cluster/Association.swift index 2faf3066a..78d104b4b 100644 --- a/Sources/DistributedCluster/Cluster/Association.swift +++ b/Sources/DistributedCluster/Cluster/Association.swift @@ -28,7 +28,7 @@ import NIO /// /// All interactions with a remote node MUST be driven through an association. /// This is important for example if a remote node is terminated, and another node is brought up on the exact same network `Node` -- -/// thus the need to keep a `UniqueNode` of both "sides" of an association -- we want to inform a remote node about our identity, +/// thus the need to keep a `Cluster.Node` of both "sides" of an association -- we want to inform a remote node about our identity, /// and want to confirm if the remote sending side of an association remains the "same exact node", or if it is a new instance on the same address. /// /// A completed ("associated") `Association` can ONLY be obtained by successfully completing a `HandshakeStateMachine` dance, @@ -53,10 +53,10 @@ final class Association: CustomStringConvertible, @unchecked Sendable { /// The address of this node, that was offered to the remote side for this association /// This matters in case we have multiple "self" addresses; e.g. we bind to one address, but expose another because NAT - let selfNode: UniqueNode - var remoteNode: UniqueNode + let selfNode: Cluster.Node + var remoteNode: Cluster.Node - init(selfNode: UniqueNode, remoteNode: UniqueNode) { + init(selfNode: Cluster.Node, remoteNode: Cluster.Node) { self.selfNode = selfNode self.remoteNode = remoteNode self.lock = Lock() @@ -246,20 +246,20 @@ extension Association { /// Tombstones are slightly lighter than a real association, and are kept for a maximum of `settings.associationTombstoneTTL` TODO: make this setting (!!!) /// before being cleaned up. struct Tombstone: Hashable { - let remoteNode: UniqueNode + let remoteNode: Cluster.Node /// Determines when the Tombstone should be removed from kept tombstones in the ClusterShell. /// End of life of the tombstone is calculated as `now + settings.associationTombstoneTTL`. let removalDeadline: ContinuousClock.Instant - init(_ node: UniqueNode, settings: ClusterSystemSettings) { + init(_ node: Cluster.Node, settings: ClusterSystemSettings) { // TODO: if we made system carry system.time we could always count from that point in time with a TimeAmount; require Clock and settings then self.removalDeadline = .fromNow(settings.associationTombstoneTTL) self.remoteNode = node } /// Used to create "any" tombstone, for being able to lookup in Set - init(_ node: UniqueNode) { + init(_ node: Cluster.Node) { self.removalDeadline = .now // ANY value here is ok, we do not use it in hash/equals self.remoteNode = node } diff --git a/Sources/DistributedCluster/Cluster/Cluster+Event.swift b/Sources/DistributedCluster/Cluster/Cluster+Event.swift index 5849f007a..ba74d52b8 100644 --- a/Sources/DistributedCluster/Cluster/Cluster+Event.swift +++ b/Sources/DistributedCluster/Cluster/Cluster+Event.swift @@ -38,8 +38,8 @@ extension Cluster { public internal(set) var member: Member /// The node which the change concerns. - public var node: UniqueNode { - self.member.uniqueNode + public var node: Cluster.Node { + self.member.node } /// Only set if the change is a "replacement", which can happen only if a node joins @@ -80,7 +80,7 @@ extension Cluster { } } - init(node: UniqueNode, previousStatus: MemberStatus?, toStatus: MemberStatus) { + init(node: Cluster.Node, previousStatus: MemberStatus?, toStatus: MemberStatus) { // FIXME: enable these assertions // assertBacktrace( // !(toStatus == .removed && fromStatus != .down), @@ -97,8 +97,8 @@ extension Cluster { /// Use to create a "replacement", when the previousNode and node are different (i.e. they should only differ in ID, not host/port) init(replaced: Member, by newMember: Member) { - assert(replaced.uniqueNode.host == newMember.uniqueNode.host, "Replacement Cluster.MembershipChange should be for same non-unique node; Was: \(replaced), and \(newMember)") - assert(replaced.uniqueNode.port == newMember.uniqueNode.port, "Replacement Cluster.MembershipChange should be for same non-unique node; Was: \(replaced), and \(newMember)") + assert(replaced.node.host == newMember.node.host, "Replacement Cluster.MembershipChange should be for same non-unique node; Was: \(replaced), and \(newMember)") + assert(replaced.node.port == newMember.node.port, "Replacement Cluster.MembershipChange should be for same non-unique node; Was: \(replaced), and \(newMember)") assert(newMember.status != .down, "Attempted to replace a member \(replaced) with a .down member: \(newMember)! This should never happen.") self.replaced = replaced diff --git a/Sources/DistributedCluster/Cluster/Cluster+Member.swift b/Sources/DistributedCluster/Cluster/Cluster+Member.swift index d8192589c..cf02e1e2a 100644 --- a/Sources/DistributedCluster/Cluster/Cluster+Member.swift +++ b/Sources/DistributedCluster/Cluster/Cluster+Member.swift @@ -19,13 +19,13 @@ extension Cluster { /// A `Member` is a node that is participating in a clustered system. /// /// It carries `Cluster.MemberStatus` and reachability information. - /// Its identity is the underlying `UniqueNode`, other fields are not taken into account when comparing members. + /// Its identity is the underlying `Cluster.Node`, other fields are not taken into account when comparing members. public struct Member: Hashable { /// Unique node of this cluster member. - public let uniqueNode: UniqueNode + public let node: Cluster.Node /// Cluster membership status of this member, signifying the logical state it resides in the membership. - /// Note, that a node that is reachable may still become `.down`, e.g. by issuing a manual `cluster.down(node:)` command or similar. + /// Note, that a node that is reachable may still become `.down`, e.g. by issuing a manual `cluster.down(endpoint:)` command or similar. public var status: Cluster.MemberStatus /// Reachability signifies the failure detectors assessment about this members "reachability" i.e. if it is responding to health checks or not. @@ -40,16 +40,16 @@ extension Cluster { /// The sequence starts at `1`, and 0 means the node was not moved to up _yet_. public var _upNumber: Int? - public init(node: UniqueNode, status: Cluster.MemberStatus) { - self.uniqueNode = node + public init(node: Cluster.Node, status: Cluster.MemberStatus) { + self.node = node self.status = status self._upNumber = nil self.reachability = .reachable } - internal init(node: UniqueNode, status: Cluster.MemberStatus, upNumber: Int) { + internal init(node: Cluster.Node, status: Cluster.MemberStatus, upNumber: Int) { assert(!status.isJoining, "Node \(node) was \(status) yet was given upNumber: \(upNumber). This is incorrect, as only at-least .up members may have upNumbers!") - self.uniqueNode = node + self.node = node self.status = status self._upNumber = upNumber self.reachability = .reachable @@ -72,11 +72,11 @@ extension Cluster { public var asDownIfNotAlready: Member { switch self.status { case .joining, .up, .leaving: - return Member(node: self.uniqueNode, status: .down) + return Member(node: self.node, status: .down) case .down, .removed: return self case ._PLEASE_DO_NOT_EXHAUSTIVELY_MATCH_THIS_ENUM_NEW_CASES_MIGHT_BE_ADDED_IN_THE_FUTURE: - return Member(node: self.uniqueNode, status: .down) + return Member(node: self.node, status: .down) } } @@ -114,11 +114,11 @@ extension Cluster { extension Cluster.Member: Equatable { public func hash(into hasher: inout Hasher) { - self.uniqueNode.hash(into: &hasher) + self.node.hash(into: &hasher) } public static func == (lhs: Cluster.Member, rhs: Cluster.Member) -> Bool { - lhs.uniqueNode == rhs.uniqueNode + lhs.node == rhs.node } } @@ -140,17 +140,17 @@ extension Cluster.Member { /// An ordering by the members' `node` properties, e.g. 1.1.1.1 is "lower" than 2.2.2.2. /// This ordering somewhat unusual, however always consistent and used to select a leader -- see `LowestReachableMember`. public static let lowestAddressOrdering: (Cluster.Member, Cluster.Member) -> Bool = { l, r in - l.uniqueNode < r.uniqueNode + l.node < r.node } } extension Cluster.Member: CustomStringConvertible, CustomDebugStringConvertible { public var description: String { - "Member(\(self.uniqueNode), status: \(self.status), reachability: \(self.reachability))" + "Member(\(self.node), status: \(self.status), reachability: \(self.reachability))" } public var debugDescription: String { - "Member(\(String(reflecting: self.uniqueNode)), status: \(self.status), reachability: \(self.reachability)\(self._upNumber.map { ", _upNumber: \($0)" } ?? ""))" + "Member(\(String(reflecting: self.node)), status: \(self.status), reachability: \(self.reachability)\(self._upNumber.map { ", _upNumber: \($0)" } ?? ""))" } } diff --git a/Sources/DistributedCluster/Cluster/Cluster+Membership.swift b/Sources/DistributedCluster/Cluster/Cluster+Membership.swift index 92e3c4cf7..9e6599a7c 100644 --- a/Sources/DistributedCluster/Cluster/Cluster+Membership.swift +++ b/Sources/DistributedCluster/Cluster/Cluster+Membership.swift @@ -24,7 +24,7 @@ extension Cluster { /// Leaving the cluster may be graceful or triggered by a failure detector. /// /// ### Replacement (Unique)Nodes - /// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. ``Node``), + /// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. ``Cluster/Endpoint``), /// with another member; It MAY join "over" an existing node and will immediately cause the previous node to be marked ``Cluster/MemberStatus/down`` /// upon such transition. Such situations can take place when an actor system node is killed and started on the same host+port immediately, /// and attempts to connect to the same cluster as its previous "incarnation". Such situation is called a replacement, and by the assumption @@ -41,17 +41,17 @@ extension Cluster { .init(members: []) } - /// Members MUST be stored `UniqueNode` rather than plain node, since there may exist "replacements" which we need + /// Members MUST be stored `Cluster.Node` rather than plain node, since there may exist "replacements" which we need /// to track gracefully -- in order to tell all other nodes that those nodes are now down/leaving/removed, if a /// node took their place. This makes lookup by `Node` not nice, but realistically, that lookup is quite rare -- only - /// when operator issued moves are induced e.g. "> down 1.1.1.1:3333", since operators do not care about `NodeID` most of the time. - internal var _members: [UniqueNode: Cluster.Member] + /// when operator issued moves are induced e.g. "> down 1.1.1.1:3333", since operators do not care about `Cluster.Node.ID` most of the time. + internal var _members: [Cluster.Node: Cluster.Member] /// Initialize a membership with the given members. public init(members: [Cluster.Member]) { self._members = Dictionary(minimumCapacity: members.count) for member in members { - self._members[member.uniqueNode] = member + self._members[member.node] = member } } @@ -62,19 +62,19 @@ extension Cluster { // ==== ------------------------------------------------------------------------------------------------------------ // MARK: Members - /// Retrieves a `Member` by its `UniqueNode`. + /// Retrieves a `Member` by its `Cluster.Node`. /// /// This operation is guaranteed to return a member if it was added to the membership UNLESS the member has been `.removed` /// and dropped which happens only after an extended period of time. // FIXME: That period of time is not implemented - public func uniqueMember(_ node: UniqueNode) -> Cluster.Member? { + public func uniqueMember(_ node: Cluster.Node) -> Cluster.Member? { self._members[node] } /// Picks "first", in terms of least progressed among its lifecycle member in presence of potentially multiple members /// for a non-unique `Node`. In practice, this happens when an existing node is superseded by a "replacement", and the /// previous node becomes immediately down. - public func member(_ node: Node) -> Cluster.Member? { - self._members.values.sorted(by: Cluster.MemberStatus.lifecycleOrdering).first(where: { $0.uniqueNode.node == node }) + public func member(_ endpoint: Cluster.Endpoint) -> Cluster.Member? { + self._members.values.sorted(by: Cluster.MemberStatus.lifecycleOrdering).first(where: { $0.node.endpoint == endpoint }) } public func youngestMember() -> Cluster.Member? { @@ -167,8 +167,8 @@ extension Cluster { } /// Find specific member, identified by its unique node identity. - public func member(byUniqueNodeID nid: UniqueNode.ID) -> Cluster.Member? { - // TODO: make this O(1) by allowing wrapper type to equality check only on NodeID + public func member(byUniqueNodeID nid: Cluster.Node.ID) -> Cluster.Member? { + // TODO: make this O(1) by allowing wrapper type to equality check only on Cluster.Node.ID self._members.first(where: { $0.key.nid == nid })?.value } @@ -192,11 +192,11 @@ extension Cluster { self._leaderNode.flatMap { self.uniqueMember($0) } } set { - self._leaderNode = newValue?.uniqueNode + self._leaderNode = newValue?.node } } - internal var _leaderNode: UniqueNode? + internal var _leaderNode: Cluster.Node? /// Returns a copy of the membership, though without any leaders assigned. public var leaderless: Cluster.Membership { @@ -207,18 +207,18 @@ extension Cluster { /// Checks if passed in node is the leader (given the current view of the cluster state by this Membership). // TODO: this could take into account roles, if we do them - public func isLeader(_ node: UniqueNode) -> Bool { - self.leader?.uniqueNode == node + public func isLeader(_ node: Cluster.Node) -> Bool { + self.leader?.node == node } /// Checks if passed in node is the leader (given the current view of the cluster state by this Membership). public func isLeader(_ member: Cluster.Member) -> Bool { - self.isLeader(member.uniqueNode) + self.isLeader(member.node) } - /// Checks if the membership contains a member representing this ``UniqueNode``. - func contains(_ uniqueNode: UniqueNode) -> Bool { - self._members[uniqueNode] != nil + /// Checks if the membership contains a member representing this ``Cluster.Node``. + func contains(_ node: Cluster.Node) -> Bool { + self._members[node] != nil } } } @@ -235,7 +235,7 @@ extension Cluster.Membership: Hashable { public func hash(into hasher: inout Hasher) { hasher.combine(self._leaderNode) for member in self._members.values { - hasher.combine(member.uniqueNode) + hasher.combine(member.node) hasher.combine(member.status) hasher.combine(member.reachability) } @@ -250,7 +250,7 @@ extension Cluster.Membership: Hashable { } for (lNode, lMember) in lhs._members { if let rMember = rhs._members[lNode], - lMember.uniqueNode != rMember.uniqueNode || + lMember.node != rMember.node || lMember.status != rMember.status || lMember.reachability != rMember.reachability { @@ -266,8 +266,8 @@ extension Cluster.Membership: CustomStringConvertible, CustomDebugStringConverti /// Pretty multi-line output of a membership, useful for manual inspection public var prettyDescription: String { var res = "leader: \(self.leader, orElse: ".none")" - for member in self._members.values.sorted(by: { $0.uniqueNode.node.port < $1.uniqueNode.node.port }) { - res += "\n \(reflecting: member.uniqueNode) status [\(member.status.rawValue, leftPadTo: Cluster.MemberStatus.maxStrLen)]" + for member in self._members.values.sorted(by: { $0.node.endpoint.port < $1.node.endpoint.port }) { + res += "\n \(reflecting: member.node) status [\(member.status.rawValue, leftPadTo: Cluster.MemberStatus.maxStrLen)]" } return res } @@ -302,7 +302,7 @@ extension Cluster.Membership { if let knownUnique = self.uniqueMember(change.node) { // it is known uniquely, so we just update its status - return self.mark(knownUnique.uniqueNode, as: change.status) + return self.mark(knownUnique.node, as: change.status) } if change.isAtLeast(.leaving) { @@ -311,12 +311,12 @@ extension Cluster.Membership { return nil } - if let previousMember = self.member(change.node.node) { + if let previousMember = self.member(change.node.endpoint) { // we are joining "over" an existing incarnation of a node; causing the existing node to become .down immediately if previousMember.status < .down { - _ = self.mark(previousMember.uniqueNode, as: .down) + _ = self.mark(previousMember.node, as: .down) } else { - _ = self.removeCompletely(previousMember.uniqueNode) // the replacement event will handle the down notifications + _ = self.removeCompletely(previousMember.node) // the replacement event will handle the down notifications } self._members[change.node] = change.member @@ -324,7 +324,7 @@ extension Cluster.Membership { return .init(replaced: previousMember, by: change.member) } else { // node is normally joining - self._members[change.member.uniqueNode] = change.member + self._members[change.member.node] = change.member return change } } @@ -349,11 +349,11 @@ extension Cluster.Membership { // for single node "cluster" we allow becoming the leader myself eagerly (e.g. useful in testing) if self._members.count == 0 { - _ = self.join(wannabeLeader.uniqueNode) + _ = self.join(wannabeLeader.node) } // we soundness check that the wanna-be leader is already a member - guard self._members[wannabeLeader.uniqueNode] != nil else { + guard self._members[wannabeLeader.node] != nil else { throw Cluster.MembershipError(.nonMemberLeaderSelected(self, wannabeLeader: wannabeLeader)) } @@ -375,18 +375,18 @@ extension Cluster.Membership { /// - Returns: the changed member if the change was a transition (unreachable -> reachable, or back), /// or `nil` if the reachability is the same as already known by the membership. public mutating func applyReachabilityChange(_ change: Cluster.ReachabilityChange) -> Cluster.Member? { - self.mark(change.member.uniqueNode, reachability: change.member.reachability) + self.mark(change.member.node, reachability: change.member.reachability) } /// Returns the change; e.g. if we replaced a node the change `from` will be populated and perhaps a connection should /// be closed to that now-replaced node, since we have replaced it with a new node. - public mutating func join(_ node: UniqueNode) -> Cluster.MembershipChange? { + public mutating func join(_ node: Cluster.Node) -> Cluster.MembershipChange? { var change = Cluster.MembershipChange(member: Cluster.Member(node: node, status: .joining)) change.previousStatus = nil return self.applyMembershipChange(change) } - public func joining(_ node: UniqueNode) -> Cluster.Membership { + public func joining(_ node: Cluster.Node) -> Cluster.Membership { var membership = self _ = membership.join(node) return membership @@ -397,7 +397,7 @@ extension Cluster.Membership { /// Handles replacement nodes properly, by emitting a "replacement" change, and marking the replaced node as `MemberStatus.down`. /// /// If the membership not aware of this address the update is treated as a no-op. - public mutating func mark(_ node: UniqueNode, as status: Cluster.MemberStatus) -> Cluster.MembershipChange? { + public mutating func mark(_ node: Cluster.Node, as status: Cluster.MemberStatus) -> Cluster.MembershipChange? { if let existingExactMember = self.uniqueMember(node) { guard existingExactMember.status < status else { // this would be a "move backwards" which we do not do; membership only moves forward @@ -409,11 +409,11 @@ extension Cluster.Membership { if status == .up { updatedMember._upNumber = self.youngestMember()?._upNumber ?? 1 } - self._members[existingExactMember.uniqueNode] = updatedMember + self._members[existingExactMember.node] = updatedMember return Cluster.MembershipChange(member: existingExactMember, toStatus: status) - } else if let beingReplacedMember = self.member(node.node) { - // We did not get a member by exact UniqueNode match, but we got one by Node match... + } else if let beingReplacedMember = self.member(node.endpoint) { + // We did not get a member by exact Cluster.Node match, but we got one by Node match... // this means this new node that we are trying to mark is a "replacement" and the `beingReplacedNode` must be .downed! // We do not check the "only move forward rule" as this is a NEW node, and is replacing @@ -421,10 +421,10 @@ extension Cluster.Membership { // This still means that the current `.up` one is very likely down already just that we have not noticed _yet_. // replacement: - let replacedNode = Cluster.Member(node: beingReplacedMember.uniqueNode, status: .down) + let replacedMember = Cluster.Member(node: beingReplacedMember.node, status: .down) let nodeD = Cluster.Member(node: node, status: status) - self._members[replacedNode.uniqueNode] = replacedNode - self._members[nodeD.uniqueNode] = nodeD + self._members[replacedMember.node] = replacedMember + self._members[nodeD.node] = nodeD return Cluster.MembershipChange(replaced: beingReplacedMember, by: nodeD) } else { @@ -436,7 +436,7 @@ extension Cluster.Membership { /// Returns new membership while marking an existing member with the specified status. /// /// If the membership not aware of this node the update is treated as a no-op. - public func marking(_ node: UniqueNode, as status: Cluster.MemberStatus) -> Cluster.Membership { + public func marking(_ node: Cluster.Node, as status: Cluster.MemberStatus) -> Cluster.Membership { var membership = self _ = membership.mark(node, as: status) return membership @@ -445,7 +445,7 @@ extension Cluster.Membership { /// Mark node with passed in `reachability` /// /// - Returns: the changed member if the reachability was different than the previously stored one. - public mutating func mark(_ node: UniqueNode, reachability: Cluster.MemberReachability) -> Cluster.Member? { + public mutating func mark(_ node: Cluster.Node, reachability: Cluster.MemberReachability) -> Cluster.Member? { guard var member = self._members.removeValue(forKey: node) else { // no such member return nil @@ -468,7 +468,7 @@ extension Cluster.Membership { /// /// - Warning: When removing nodes from cluster one MUST also prune the seen tables (!) of the gossip. /// Rather than calling this function directly, invoke `Cluster.Gossip.removeMember()` which performs all needed cleanups. - public mutating func removeCompletely(_ node: UniqueNode) -> Cluster.MembershipChange? { + public mutating func removeCompletely(_ node: Cluster.Node) -> Cluster.MembershipChange? { if let member = self._members[node] { self._members.removeValue(forKey: node) return .init(member: member, toStatus: .removed) @@ -478,7 +478,7 @@ extension Cluster.Membership { } /// Returns new membership while removing an existing member, identified by the passed in node. - public func removingCompletely(_ node: UniqueNode) -> Cluster.Membership { + public func removingCompletely(_ node: Cluster.Node) -> Cluster.Membership { var membership = self _ = membership.removeCompletely(node) return membership @@ -513,7 +513,7 @@ extension Cluster.Membership { /// Warning: Leaders are not "merged", they get elected by each node (!). /// /// - Returns: any membership changes that occurred (and have affected the current membership). - public mutating func mergeFrom(incoming: Cluster.Membership, myself: UniqueNode?) -> [Cluster.MembershipChange] { + public mutating func mergeFrom(incoming: Cluster.Membership, myself: Cluster.Node?) -> [Cluster.MembershipChange] { var changes: [Cluster.MembershipChange] = [] // Set of nodes whose members are currently .down, and not present in the incoming gossip. @@ -522,13 +522,13 @@ extension Cluster.Membership { // if any remain in the set, it means they were removed in the incoming membership // since we strongly assume the incoming one is "ahead" (i.e. `self happenedBefore ahead`), // we remove these members and emit .removed changes. - var downNodesToRemove: Set = Set(self.members(withStatus: .down).map(\.uniqueNode)) + var downNodesToRemove: Set = Set(self.members(withStatus: .down).map(\.node)) // 1) move forward any existing members or new members according to the `ahead` statuses for incomingMember in incoming._members.values { - downNodesToRemove.remove(incomingMember.uniqueNode) + downNodesToRemove.remove(incomingMember.node) - guard var knownMember = self._members[incomingMember.uniqueNode] else { + guard var knownMember = self._members[incomingMember.node] else { // member NOT known locally ---------------------------------------------------------------------------- // only proceed if the member isn't already on its way out @@ -538,7 +538,7 @@ extension Cluster.Membership { } // it is information about a new member, merge it in - self._members[incomingMember.uniqueNode] = incomingMember + self._members[incomingMember.node] = incomingMember var change = Cluster.MembershipChange(member: incomingMember) change.previousStatus = nil // since "new" @@ -549,9 +549,9 @@ extension Cluster.Membership { // it is a known member ------------------------------------------------------------------------------------ if let change = knownMember.moveForward(to: incomingMember.status) { if change.status.isRemoved { - self._members.removeValue(forKey: incomingMember.uniqueNode) + self._members.removeValue(forKey: incomingMember.node) } else { - self._members[incomingMember.uniqueNode] = knownMember + self._members[incomingMember.node] = knownMember } changes.append(change) } @@ -626,8 +626,8 @@ extension Cluster.Membership { // iterate over the original member set, and remove from the `to` set any seen members for member in from._members.values { - if let toMember = to.uniqueMember(member.uniqueNode) { - to._members.removeValue(forKey: member.uniqueNode) + if let toMember = to.uniqueMember(member.node) { + to._members.removeValue(forKey: member.node) if member.status != toMember.status { entries.append(.init(member: member, toStatus: toMember.status)) } @@ -639,7 +639,7 @@ extension Cluster.Membership { // any remaining `to` members, are new members for member in to._members.values { - entries.append(.init(node: member.uniqueNode, previousStatus: nil, toStatus: member.status)) + entries.append(.init(node: member.node, previousStatus: nil, toStatus: member.status)) } return MembershipDiff(changes: entries) @@ -669,8 +669,8 @@ extension Cluster { public struct MembershipError: Error, CustomStringConvertible { internal enum _MembershipError: CustomPrettyStringConvertible { case nonMemberLeaderSelected(Cluster.Membership, wannabeLeader: Cluster.Member) - case notFound(UniqueNode, in: Cluster.Membership) - case notFoundAny(Node, in: Cluster.Membership) + case notFound(Cluster.Node, in: Cluster.Membership) + case notFoundAny(Cluster.Endpoint, in: Cluster.Membership) case atLeastStatusRequirementNotMet(expectedAtLeast: Cluster.MemberStatus, found: Cluster.Member) case statusRequirementNotMet(expected: Cluster.MemberStatus, found: Cluster.Member) case awaitStatusTimedOut(Duration, Error?) @@ -688,9 +688,9 @@ extension Cluster { case .notFoundAny(let node, let membership): return "[\(node)] is not a member [\(membership)]" case .atLeastStatusRequirementNotMet(let expectedAtLeastStatus, let foundMember): - return "Expected \(reflecting: foundMember.uniqueNode) to be seen as at-least [\(expectedAtLeastStatus)] but was [\(foundMember.status)]" + return "Expected \(reflecting: foundMember.node) to be seen as at-least [\(expectedAtLeastStatus)] but was [\(foundMember.status)]" case .statusRequirementNotMet(let expectedStatus, let foundMember): - return "Expected \(reflecting: foundMember.uniqueNode) to be seen as [\(expectedStatus)] but was [\(foundMember.status)]" + return "Expected \(reflecting: foundMember.node) to be seen as [\(expectedStatus)] but was [\(foundMember.status)]" case .awaitStatusTimedOut(let duration, let lastError): let lastErrorMessage: String if let error = lastError { diff --git a/Sources/DistributedCluster/Cluster/ClusterControl.swift b/Sources/DistributedCluster/Cluster/ClusterControl.swift index d0c8a8ade..f268a94ef 100644 --- a/Sources/DistributedCluster/Cluster/ClusterControl.swift +++ b/Sources/DistributedCluster/Cluster/ClusterControl.swift @@ -77,13 +77,18 @@ public struct ClusterControl { self.events = eventStream var initialMembership: Cluster.Membership = .empty - _ = initialMembership.join(settings.uniqueBindNode) + _ = initialMembership.join(settings.bindNode) self._membershipSnapshotHolder = ClusterControl.MembershipHolder(membership: initialMembership) } /// The node value representing _this_ node in the cluster. - public var uniqueNode: UniqueNode { - self.settings.uniqueBindNode + public var node: Cluster.Node { + self.settings.bindNode + } + + /// The endpoint value representing _this_ node in the cluster. + public var endpoint: Cluster.Endpoint { + self.node.endpoint } /// Instructs the cluster to join the actor system located listening on the passed in host-port pair. @@ -91,31 +96,31 @@ public struct ClusterControl { /// There is no specific need to "wait until joined" before one can attempt to send to references located on the cluster member, /// as message sends will be buffered until the node associates and joins. public func join(host: String, port: Int) { - self.join(node: Node(systemName: "sact", host: host, port: port)) + self.join(endpoint: Cluster.Endpoint(systemName: "sact", host: host, port: port)) } /// Instructs the cluster to join the actor system located listening on the passed in host-port pair. /// /// There is no specific need to "wait until joined" before one can attempt to send to references located on the cluster member, /// as message sends will be buffered until the node associates and joins. - public func join(node: Node) { - self.ref.tell(.command(.handshakeWith(node))) + public func join(endpoint: Cluster.Endpoint) { + self.ref.tell(.command(.handshakeWith(endpoint))) } - /// Usually not to be used, as having an instance of a `UniqueNode` in hand + /// Usually not to be used, as having an instance of a `Cluster.Node` in hand /// is normally only possible after a handshake with the remote node has completed. /// /// However, in local testing scenarios, where the two nodes are executing in the same process (e.g. in a test), /// this call saves the unwrapping of `cluster.node` into the generic node when joining them. /// /// - Parameter node: The node to be joined by this system. - public func join(node: UniqueNode) { - self.join(node: node.node) + public func join(node: Cluster.Node) { + self.join(endpoint: node.endpoint) } /// Gracefully public func leave() { - self.ref.tell(.command(.downCommand(self.uniqueNode.node))) + self.ref.tell(.command(.downCommand(self.node.endpoint))) } /// Mark *any* currently known member as ``Cluster/MemberStatus/down``. @@ -131,8 +136,8 @@ public struct ClusterControl { /// pair however are accepted to join the cluster (though technically this is a newly joining node, not really a "re-join"). /// /// - SeeAlso: `Cluster.MemberStatus` for more discussion about what the `.down` status implies. - public func down(node: Node) { - self.ref.tell(.command(.downCommand(node))) + public func down(endpoint: Cluster.Endpoint) { + self.ref.tell(.command(.downCommand(endpoint))) } /// Mark the passed in `Cluster.Member` as `Cluster.MemberStatus` `.down`. @@ -158,7 +163,7 @@ public struct ClusterControl { /// - Returns `Cluster.Member` for the joined node. @discardableResult public func joined(within: Duration) async throws -> Cluster.Member { - try await self.waitFor(self.uniqueNode, .up, within: within) + try await self.waitFor(self.node, .up, within: within) } /// Wait, within the given duration, until the passed in node has joined the cluster and become ``Cluster/MemberStatus/up``. @@ -169,7 +174,7 @@ public struct ClusterControl { /// /// - Returns `Cluster.Member` for the joined node. @discardableResult - public func joined(node: UniqueNode, within: Duration) async throws -> Cluster.Member { + public func joined(node: Cluster.Node, within: Duration) async throws -> Cluster.Member { try await self.waitFor(node, .up, within: within) } @@ -181,8 +186,8 @@ public struct ClusterControl { /// /// - Returns `Cluster.Member` for the joined node. @discardableResult - public func joined(node: Node, within: Duration) async throws -> Cluster.Member? { - try await self.waitFor(node, .up, within: within) + public func joined(endpoint: Cluster.Endpoint, within: Duration) async throws -> Cluster.Member? { + try await self.waitFor(self.node, .up, within: within) } /// Wait, within the given duration, for this actor system to be a member of all the nodes' respective cluster and have the specified status. @@ -191,7 +196,7 @@ public struct ClusterControl { /// - nodes: The nodes to be joined by this system. /// - status: The expected member status. /// - within: Duration to wait for. - public func waitFor(_ nodes: some Collection, _ status: Cluster.MemberStatus, within: Duration) async throws { + public func waitFor(_ nodes: some Collection, _ status: Cluster.MemberStatus, within: Duration) async throws { try await withThrowingTaskGroup(of: Void.self) { group in for node in nodes { group.addTask { @@ -209,7 +214,7 @@ public struct ClusterControl { /// - nodes: The nodes to be joined by this system. /// - status: The minimum expected member status. /// - within: Duration to wait for. - public func waitFor(_ nodes: some Collection, atLeast atLeastStatus: Cluster.MemberStatus, within: Duration) async throws { + public func waitFor(_ nodes: some Collection, atLeast atLeastStatus: Cluster.MemberStatus, within: Duration) async throws { try await withThrowingTaskGroup(of: Void.self) { group in for node in nodes { group.addTask { @@ -232,7 +237,7 @@ public struct ClusterControl { /// If the expected status is `.down` or `.removed`, and the node is already known to have been removed from the cluster /// a synthesized `Cluster/MemberStatus/removed` (and `.unreachable`) member is returned. @discardableResult - public func waitFor(_ node: UniqueNode, _ status: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member { + public func waitFor(_ node: Cluster.Node, _ status: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member { try await self.waitForMembershipEventually(within: within) { membership in if status == .down || status == .removed { if let cluster = self.cluster, cluster.getExistingAssociationTombstone(with: node) != nil { @@ -266,13 +271,13 @@ public struct ClusterControl { /// If the expected status is `.down` or `.removed`, and the node is already known to have been removed from the cluster /// a synthesized `Cluster/MemberStatus/removed` (and `.unreachable`) member is returned. @discardableResult - public func waitFor(_ node: Node, _ status: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member? { + public func waitFor(_ endpoint: Cluster.Endpoint, _ status: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member? { try await self.waitForMembershipEventually(Cluster.Member?.self, within: within) { membership in - guard let foundMember = membership.member(node) else { + guard let foundMember = membership.member(endpoint) else { if status == .down || status == .removed { return nil } - throw Cluster.MembershipError(.notFoundAny(node, in: membership)) + throw Cluster.MembershipError(.notFoundAny(endpoint, in: membership)) } if status != foundMember.status { @@ -293,7 +298,7 @@ public struct ClusterControl { /// If the expected status is at least `.down` or `.removed`, and either a tombstone exists for the node or the associated /// membership is not found, the `Cluster.Member` returned would have `.removed` status and *unreachable*. @discardableResult - public func waitFor(_ node: UniqueNode, atLeast atLeastStatus: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member { + public func waitFor(_ node: Cluster.Node, atLeast atLeastStatus: Cluster.MemberStatus, within: Duration) async throws -> Cluster.Member { try await self.waitForMembershipEventually(within: within) { membership in if atLeastStatus == .down || atLeastStatus == .removed { if let cluster = self.cluster, cluster.getExistingAssociationTombstone(with: node) != nil { diff --git a/Sources/DistributedCluster/Cluster/ClusterEventStream.swift b/Sources/DistributedCluster/Cluster/ClusterEventStream.swift index 3bce41544..29e0c2296 100644 --- a/Sources/DistributedCluster/Cluster/ClusterEventStream.swift +++ b/Sources/DistributedCluster/Cluster/ClusterEventStream.swift @@ -128,7 +128,7 @@ internal distributed actor ClusterEventStreamActor: LifecycleWatch { static var props: _Props { var ps = _Props() - ps._knownActorName = "clustEventStream" + ps._knownActorName = "clusterEventStream" ps._systemActor = true ps._wellKnown = true return ps diff --git a/Sources/DistributedCluster/Cluster/ClusterShell+LeaderActions.swift b/Sources/DistributedCluster/Cluster/ClusterShell+LeaderActions.swift index 5e6b5b931..c153e95ed 100644 --- a/Sources/DistributedCluster/Cluster/ClusterShell+LeaderActions.swift +++ b/Sources/DistributedCluster/Cluster/ClusterShell+LeaderActions.swift @@ -147,7 +147,7 @@ extension ClusterShell { state._latestGossip.incrementOwnerVersion() state.gossiperControl.update(payload: state._latestGossip) - self.terminateAssociation(system, state: &state, memberToRemove.uniqueNode) + self.terminateAssociation(system, state: &state, memberToRemove.node) state.log.info( "Leader removed member: \(memberToRemove), all nodes are certain to have seen it as [.down] before", diff --git a/Sources/DistributedCluster/Cluster/ClusterShell+Logging.swift b/Sources/DistributedCluster/Cluster/ClusterShell+Logging.swift index 522a87ff8..5be557cf0 100644 --- a/Sources/DistributedCluster/Cluster/ClusterShell+Logging.swift +++ b/Sources/DistributedCluster/Cluster/ClusterShell+Logging.swift @@ -42,7 +42,7 @@ extension ClusterShellState { if let level = level { self.log.log( level: level, - "[tracelog:cluster] \(type.description)(\(self.settings.node.port)): \(message)", + "[tracelog:cluster] \(type.description)(\(self.settings.endpoint.port)): \(message)", file: file, function: function, line: line ) } @@ -82,9 +82,9 @@ extension ClusterShell { } enum TraceLogType: CustomStringConvertible { - case send(to: Node) - case receive(from: Node) - case receiveUnique(from: UniqueNode) + case send(to: Cluster.Endpoint) + case receive(from: Cluster.Endpoint) + case receiveUnique(from: Cluster.Node) case gossip(Cluster.MembershipGossip) var description: String { diff --git a/Sources/DistributedCluster/Cluster/ClusterShell.swift b/Sources/DistributedCluster/Cluster/ClusterShell.swift index 241c3e32b..d9b4a7d94 100644 --- a/Sources/DistributedCluster/Cluster/ClusterShell.swift +++ b/Sources/DistributedCluster/Cluster/ClusterShell.swift @@ -35,8 +35,8 @@ internal class ClusterShell { static let gossipID: StringGossipIdentifier = "membership" - private var selfNode: UniqueNode { - self.settings.uniqueBindNode + private var selfNode: Cluster.Node { + self.settings.bindNode } private let settings: ClusterSystemSettings @@ -50,21 +50,21 @@ internal class ClusterShell { /// Used by remote actor refs to obtain associations /// - Protected by: `_associationsLock` - private var _associations: [UniqueNode: Association] + private var _associations: [Cluster.Node: Association] /// Node tombstones are kept here in order to avoid attempting to associate if we get a reference to such node, /// which would normally trigger an `ensureAssociated`. /// - Protected by: `_associationsLock` - private var _associationTombstones: [UniqueNode: Association.Tombstone] + private var _associationTombstones: [Cluster.Node: Association.Tombstone] - internal func getAnyExistingAssociation(with node: Node) -> Association? { + internal func getAnyExistingAssociation(with endpoint: Cluster.Endpoint) -> Association? { self._associationsLock.withLock { - // TODO: a bit terrible; perhaps key should be Node and then confirm by UniqueNode? + // TODO: a bit terrible; perhaps key should be Node and then confirm by Cluster.Node? // this used to be separated in the State keeping them by Node and here we kept by unique though that caused other challenges - self._associations.first { $0.key.node == node }?.value + self._associations.first { $0.key.endpoint == endpoint }?.value } } - internal func getExistingAssociationTombstone(with node: UniqueNode) -> Association.Tombstone? { + internal func getExistingAssociationTombstone(with node: Cluster.Node) -> Association.Tombstone? { self._associationsLock.withLock { self._associationTombstones[node] } @@ -72,7 +72,7 @@ internal class ClusterShell { /// Get an existing association or ensure that a new one shall be stored and joining kicked off if the target node was not known yet. /// Safe to concurrently access by privileged internals directly - internal func getEnsureAssociation(with node: UniqueNode, file: String = #filePath, line: UInt = #line) -> StoredAssociationState { + internal func getEnsureAssociation(with node: Cluster.Node, file: String = #filePath, line: UInt = #line) -> StoredAssociationState { self._associationsLock.withLock { if let tombstone = self._associationTombstones[node] { return .tombstone(tombstone) @@ -91,7 +91,7 @@ internal class ClusterShell { } } - internal func getSpecificExistingAssociation(with node: UniqueNode) -> Association? { + internal func getSpecificExistingAssociation(with node: Cluster.Node) -> Association? { self._associationsLock.withLock { self._associations[node] } @@ -109,7 +109,7 @@ internal class ClusterShell { private func completeAssociation(_ associated: ClusterShellState.AssociatedDirective, file: String = #filePath, line: UInt = #line) throws { // 1) Complete and store the association try self._associationsLock.withLockVoid { - let node: UniqueNode = associated.handshake.remoteNode + let node: Cluster.Node = associated.handshake.remoteNode let association = self._associations[node] ?? Association(selfNode: associated.handshake.localNode, remoteNode: node) @@ -128,17 +128,17 @@ internal class ClusterShell { /// Performs all cleanups related to terminating an association: /// - cleans the Shell local Association cache - /// - sets a tombstone for the now-tombstone UniqueNode + /// - sets a tombstone for the now-tombstone Cluster.Node /// - ensures node is at least .down in the Membership /// /// Can be invoked as result of a direct .down being issued, or because of a node replacement happening. - internal func terminateAssociation(_ system: ClusterSystem, state: inout ClusterShellState, _ remoteNode: UniqueNode) { - traceLog_Remote(system.cluster.uniqueNode, "Terminate association with [\(remoteNode)]") + internal func terminateAssociation(_ system: ClusterSystem, state: inout ClusterShellState, _ remoteNode: Cluster.Node) { + traceLog_Remote(system.cluster.node, "Terminate association with [\(remoteNode)]") let removedAssociationOption: Association? = self._associationsLock.withLock { // tombstone the association in the shell immediately. // No more message sends to the system will be possible. - traceLog_Remote(system.cluster.uniqueNode, "Finish terminate association [\(remoteNode)]: Stored tombstone") + traceLog_Remote(system.cluster.node, "Finish terminate association [\(remoteNode)]: Stored tombstone") self._associationTombstones[remoteNode] = Association.Tombstone(remoteNode, settings: system.settings) return self._associations.removeValue(forKey: remoteNode) } @@ -152,7 +152,7 @@ internal class ClusterShell { // notify the failure detector, that we shall assume this node as dead from here on. // it's gossip will also propagate the information through the cluster - traceLog_Remote(system.cluster.uniqueNode, "Finish terminate association [\(remoteNode)]: Notifying SWIM, .confirmDead") + traceLog_Remote(system.cluster.node, "Finish terminate association [\(remoteNode)]: Notifying SWIM, .confirmDead") system.log.warning("Confirm .dead to underlying SWIM, node: \(reflecting: remoteNode)") self._swimShell.confirmDead(node: remoteNode) @@ -183,13 +183,13 @@ internal class ClusterShell { internal static func shootTheOtherNodeAndCloseConnection(system: ClusterSystem, targetNodeAssociation: Association) { let log = system.log let remoteNode = targetNodeAssociation.remoteNode - traceLog_Remote(system.cluster.uniqueNode, "Finish terminate association [\(remoteNode)]: Shooting the other node a direct .gossip to down itself") + traceLog_Remote(system.cluster.node, "Finish terminate association [\(remoteNode)]: Shooting the other node a direct .gossip to down itself") // On purpose use the "raw" RemoteControl to send the message -- this way we avoid the association lookup (it may already be removed), // and directly hit the channel. It is also guaranteed that the message is flushed() before we close it in the next line. let shootTheOtherNodePromise: EventLoopPromise = system._eventLoopGroup.next().makePromise(of: Void.self) - let ripMessage = Payload(payload: .message(ClusterShell.Message.inbound(.restInPeace(remoteNode, from: system.cluster.uniqueNode)))) + let ripMessage = Payload(payload: .message(ClusterShell.Message.inbound(.restInPeace(remoteNode, from: system.cluster.node)))) targetNodeAssociation.sendUserMessage( envelope: ripMessage, recipient: ._clusterShell(on: remoteNode), @@ -226,7 +226,7 @@ internal class ClusterShell { } /// For testing only. - internal func _associatedNodes() -> Set { + internal func _associatedNodes() -> Set { self._associationsLock.withLock { Set(self._associations.keys) } @@ -318,18 +318,18 @@ internal class ClusterShell { // this is basically our API internally for this system enum CommandMessage: _NotActuallyCodableMessage, SilentDeadLetter { - /// Connect and handshake with remote `Node`, obtaining an `UniqueNode` in the process. + /// Connect and handshake with remote `Node`, obtaining an `Cluster.Node` in the process. /// Once the handshake is completed, reply to `replyTo` with the handshake result, and also mark the unique node as `.joining`. /// /// If one is present, the underlying failure detector will be asked to monitor this node as well. - case handshakeWith(Node) - case handshakeWithSpecific(UniqueNode) + case handshakeWith(Cluster.Endpoint) + case handshakeWithSpecific(Cluster.Node) case retryHandshake(HandshakeStateMachine.InitiatedState) - case failureDetectorReachabilityChanged(UniqueNode, Cluster.MemberReachability) + case failureDetectorReachabilityChanged(Cluster.Node, Cluster.MemberReachability) /// Used to signal a "down was issued" either by the user, or another part of the system. - case downCommand(Node) // TODO: add reason + case downCommand(Cluster.Endpoint) // TODO: add reason /// Used to signal a "down was issued" either by the user, or another part of the system. case downCommandMember(Cluster.Member) case shutdown(BlockingReceptacle) // TODO: could be NIO future @@ -337,7 +337,7 @@ internal class ClusterShell { } enum QueryMessage: _NotActuallyCodableMessage { - case associatedNodes(_ActorRef>) // TODO: better type here + case associatedNodes(_ActorRef>) // TODO: better type here case currentMembership(_ActorRef) } @@ -345,18 +345,18 @@ internal class ClusterShell { case handshakeOffer(Wire.HandshakeOffer, channel: Channel, handshakeReplyTo: EventLoopPromise) case handshakeAccepted(Wire.HandshakeAccept, channel: Channel) case handshakeRejected(Wire.HandshakeReject) - case handshakeFailed(Node, Error) // TODO: remove? + case handshakeFailed(Cluster.Endpoint, Error) /// This message is used to avoid "zombie nodes" which are known as .down by other nodes, but still stay online. /// It is sent as a best-effort by any node which terminates the connection with this node, e.g. if it knows already /// about this node being `.down` yet it still somehow attempts to communicate with the another node. /// /// Upon receipt, should be interpreted as having to immediately down myself. - case restInPeace(UniqueNode, from: UniqueNode) + case restInPeace(Cluster.Node, from: Cluster.Node) } // TODO: reformulate as Wire.accept / reject? internal enum HandshakeResult: Equatable, _NotActuallyCodableMessage { - case success(UniqueNode) + case success(Cluster.Node) case failure(HandshakeStateMachine.HandshakeConnectionError) } @@ -376,7 +376,7 @@ extension ClusterShell { private func bind() -> _Behavior { return .setup { context in // let clusterSettings = context.system.settings - let uniqueBindAddress = self.selfNode + let bindNode = self.selfNode // 1) failure detector (SWIM) // Moved to start method @@ -393,12 +393,12 @@ extension ClusterShell { context.watch(leadership) // if leadership fails for some reason, we are in trouble and need to know about it } - context.log.info("Binding to: [\(uniqueBindAddress)]") + context.log.info("Binding to: [\(bindNode)]") let chanElf = self.bootstrapServerSide( system: context.system, shell: context.myself, - bindAddress: uniqueBindAddress, + bindNode: bindNode, settings: self.settings, serializationPool: self.serializationPool ) @@ -414,7 +414,7 @@ extension ClusterShell { intervalRandomFactor: self.settings.membershipGossipIntervalRandomFactor, style: .acknowledged(timeout: self.settings.membershipGossipInterval), peerDiscovery: .onClusterMember(atLeast: .joining, resolve: { member in - let resolveContext = _ResolveContext.Message>(id: ._clusterGossip(on: member.uniqueNode), system: context.system) + let resolveContext = _ResolveContext.Message>(id: ._clusterGossip(on: member.node), system: context.system) return context.system._resolve(context: resolveContext).asAddressable }) ), @@ -483,8 +483,8 @@ extension ClusterShell { switch command { case .handshakeWith(let node): return self.beginHandshake(context, state, with: node) - case .handshakeWithSpecific(let uniqueNode): - return self.beginHandshake(context, state, with: uniqueNode.node) + case .handshakeWithSpecific(let node): + return self.beginHandshake(context, state, with: node.endpoint) case .retryHandshake(let initiated): return self.retryHandshake(context, state, initiated: initiated) @@ -557,7 +557,7 @@ extension ClusterShell { /// Allows processing in one spot, all membership changes which we may have emitted in other places, due to joining, downing etc. func receiveChangeMembershipRequest(_ context: _ActorContext, event: Cluster.Event) -> _Behavior { - self.tracelog(context, .receive(from: state.selfNode.node), message: event) + self.tracelog(context, .receive(from: state.selfNode.endpoint), message: event) var state = state // 1) IMPORTANT: We MUST apply and act on the incoming event FIRST, before handling the other events. @@ -663,7 +663,7 @@ extension ClusterShell { func tryConfirmDeadToSWIM(_ context: _ActorContext, _ state: ClusterShellState, change: Cluster.MembershipChange) { if change.status.isAtLeast(.down) { - self._swimShell.confirmDead(node: change.member.uniqueNode) + self._swimShell.confirmDead(node: change.member.node) } } @@ -671,14 +671,14 @@ extension ClusterShell { guard change.status < .down else { return } - guard change.member.uniqueNode != state.selfNode else { + guard change.member.node != state.selfNode else { return } // TODO: make it cleaner? though we decided to go with manual peer management as the ClusterShell owns it, hm // TODO: consider receptionist instead of this; we're "early" but receptionist could already be spreading its info to this node, since we associated. let gossipPeer: GossipShell.Ref = context.system._resolve( - context: .init(id: ._clusterGossip(on: change.member.uniqueNode), system: context.system) + context: .init(id: ._clusterGossip(on: change.member.node), system: context.system) ) // FIXME: make sure that if the peer terminated, we don't add it again in here, receptionist would be better then to power this... // today it can happen that a node goes down but we dont know yet so we add it again :O @@ -694,15 +694,15 @@ extension ClusterShell { /// Upon successful handshake, the `replyTo` actor shall be notified with its result, as well as the handshaked-with node shall be marked as `.joining`. /// /// Handshakes are currently not performed concurrently but one by one. - func beginHandshake(_ context: _ActorContext, _ state: ClusterShellState, with remoteNode: Node) -> _Behavior { + func beginHandshake(_ context: _ActorContext, _ state: ClusterShellState, with remoteNode: Cluster.Endpoint) -> _Behavior { var state = state - guard remoteNode != state.selfNode.node else { + guard remoteNode != state.selfNode.endpoint else { state.log.debug("Ignoring attempt to handshake with myself; Could have been issued as confused attempt to handshake as induced by discovery via gossip?") return .same } - // if an association exists for any UniqueNode that this Node represents, we can use this and abort the handshake dance here + // if an association exists for any Cluster.Node that this Node represents, we can use this and abort the handshake dance here if let existingAssociation = self.getAnyExistingAssociation(with: remoteNode) { state.log.debug("Association already allocated for remote: \(reflecting: remoteNode), existing association: [\(existingAssociation)]") switch existingAssociation.state { @@ -738,7 +738,7 @@ extension ClusterShell { } func retryHandshake(_ context: _ActorContext, _ state: ClusterShellState, initiated: HandshakeStateMachine.InitiatedState) -> _Behavior { - state.log.debug("Retry handshake with: \(initiated.remoteNode)") + state.log.debug("Retry handshake with: \(initiated.remoteEndpoint)") // // // FIXME: this needs more work... // let assoc = self.getRetryAssociation(with: initiated.remoteNode) @@ -749,16 +749,16 @@ extension ClusterShell { func connectSendHandshakeOffer(_ context: _ActorContext, _ state: ClusterShellState, initiated: HandshakeStateMachine.InitiatedState) -> _Behavior { var state = state state.log.debug("Extending handshake offer", metadata: [ - "handshake/remoteNode": "\(initiated.remoteNode)", + "handshake/remoteNode": "\(initiated.remoteEndpoint)", ]) let offer: Wire.HandshakeOffer = initiated.makeOffer() - self.tracelog(context, .send(to: initiated.remoteNode), message: offer) + self.tracelog(context, .send(to: initiated.remoteEndpoint), message: offer) let outboundChanElf: EventLoopFuture = self.bootstrapClientSide( system: context.system, shell: context.myself, - targetNode: initiated.remoteNode, + targetNode: initiated.remoteEndpoint, handshakeOffer: offer, settings: state.settings, serializationPool: self.serializationPool @@ -771,7 +771,7 @@ extension ClusterShell { return self.ready(state: state.onHandshakeChannelConnected(initiated: initiated, channel: chan)) case .failure(let error): - return self.onOutboundConnectionError(context, state, with: initiated.remoteNode, error: error) + return self.onOutboundConnectionError(context, state, with: initiated.remoteEndpoint, error: error) } } } @@ -842,7 +842,7 @@ extension ClusterShell { let accept = handshakeCompleted.makeAccept(whenHandshakeReplySent: nil) // 2) Only now we can succeed the accept promise (as the old one has been terminated and cleared) - self.tracelog(context, .send(to: offer.originNode.node), message: accept) + self.tracelog(context, .send(to: offer.originNode.endpoint), message: accept) handshakePromise.succeed(.accept(accept)) // 3) Complete and store the association, we are now ready to flush writes onto the network @@ -898,7 +898,7 @@ extension ClusterShell { let reject: Wire.HandshakeReject = rejectedHandshake.makeReject(whenHandshakeReplySent: { () in self.terminateAssociation(context.system, state: &state, rejectedHandshake.remoteNode) }) - self.tracelog(context, .send(to: offer.originNode.node), message: reject) + self.tracelog(context, .send(to: offer.originNode.endpoint), message: reject) handshakePromise.succeed(.reject(reject)) return self.ready(state: state) @@ -917,7 +917,7 @@ extension ClusterShell { // MARK: Failures to obtain connections extension ClusterShell { - func onOutboundConnectionError(_ context: _ActorContext, _ state: ClusterShellState, with remoteNode: Node, error: Error) -> _Behavior { + func onOutboundConnectionError(_ context: _ActorContext, _ state: ClusterShellState, with remoteNode: Cluster.Endpoint, error: Error) -> _Behavior { var state = state state.log.debug("Failed to establish outbound channel to \(remoteNode), error: \(error)", metadata: [ "handshake/remoteNode": "\(remoteNode)", @@ -941,7 +941,7 @@ extension ClusterShell { switch initiated.onConnectionError(error) { case .scheduleRetryHandshake(let retryDelay): state.log.debug("Schedule handshake retry", metadata: [ - "handshake/remoteNote": "\(initiated.remoteNode)", + "handshake/remoteNote": "\(initiated.remoteEndpoint)", "handshake/retryDelay": "\(retryDelay)", ]) context.timers.startSingle( @@ -1062,7 +1062,7 @@ extension ClusterShell { state.log.info("Accepted handshake from [\(reflecting: directive.handshake.remoteNode)] which replaces the previously known: [\(reflecting: replacedMember)].") // We MUST be careful to first terminate the association and then store the new one in 2) - self.terminateAssociation(context.system, state: &state, replacedMember.uniqueNode) + self.terminateAssociation(context.system, state: &state, replacedMember.node) // we want to update the snapshot before the events are published context.system.cluster.updateMembershipSnapshot(state.membership) @@ -1089,60 +1089,60 @@ extension ClusterShell { { state.log.warning( "Handshake rejected by [\(reject.targetNode)], it was associating and is now tombstoned", - metadata: state.metadataForHandshakes(uniqueNode: reject.targetNode, error: nil) + metadata: state.metadataForHandshakes(node: reject.targetNode, error: nil) ) self.terminateAssociation(context.system, state: &state, reject.targetNode) return self.ready(state: state) } - if let existingAssociation = self.getAnyExistingAssociation(with: reject.targetNode.node), + if let existingAssociation = self.getAnyExistingAssociation(with: reject.targetNode.endpoint), existingAssociation.isAssociated || existingAssociation.isTombstone { state.log.debug( "Handshake rejected by [\(reject.targetNode)], however existing association with node exists. Could be that a concurrent handshake was failed on purpose.", - metadata: state.metadataForHandshakes(uniqueNode: reject.targetNode, error: nil) + metadata: state.metadataForHandshakes(node: reject.targetNode, error: nil) ) return .same } state.log.warning( "Handshake rejected by [\(reject.targetNode)], reason: \(reject.reason)", - metadata: state.metadataForHandshakes(uniqueNode: reject.targetNode, error: nil) + metadata: state.metadataForHandshakes(node: reject.targetNode, error: nil) ) // FIXME: don't retry on rejections; those are final; just failures are not, clarify this return .same } - private func onHandshakeFailed(_ context: _ActorContext, _ state: ClusterShellState, with node: Node, error: Error) -> _Behavior { + private func onHandshakeFailed(_ context: _ActorContext, _ state: ClusterShellState, with endpoint: Cluster.Endpoint, error: Error) -> _Behavior { // we MAY be seeing a handshake failure from a 2 nodes concurrently shaking hands on 2 connections, // and we decided to tie-break and kill one of the connections. As such, the handshake COMPLETED successfully but // on the other connection; and the terminated one may yield an error (e.g. truncation error during proto parsing etc), // however that error is harmless - as we associated with the "other" right connection. - if let existingAssociation = self.getAnyExistingAssociation(with: node), + if let existingAssociation = self.getAnyExistingAssociation(with: endpoint), existingAssociation.isAssociated || existingAssociation.isTombstone { state.log.debug( "Handshake failed, however existing association with node exists. Could be that a concurrent handshake was failed on purpose.", - metadata: state.metadataForHandshakes(node: node, error: error) + metadata: state.metadataForHandshakes(endpoint: endpoint, error: error) ) return .same } - guard state.handshakeInProgress(with: node) != nil else { + guard state.handshakeInProgress(with: endpoint) != nil else { state.log.debug("Received handshake failed notification, however handshake is not in progress, error: \(message: error)", metadata: [ - "handshake/node": "\(node)", + "handshake/node": "\(endpoint)", ]) return .same } // TODO: tweak logging some more, this is actually not scary in racy handshakes; so it may happen often - state.log.warning("Handshake error while connecting [\(node)]: \(error)", metadata: state.metadataForHandshakes(node: node, error: error)) + state.log.warning("Handshake error while connecting [\(endpoint)]: \(error)", metadata: state.metadataForHandshakes(endpoint: endpoint, error: error)) return .same } - private func onRestInPeace(_ context: _ActorContext, _ state: ClusterShellState, intendedNode: UniqueNode, fromNode: UniqueNode) -> _Behavior { + private func onRestInPeace(_ context: _ActorContext, _ state: ClusterShellState, intendedNode: Cluster.Node, fromNode: Cluster.Node) -> _Behavior { let myselfNode = state.selfNode guard myselfNode == myselfNode else { @@ -1181,7 +1181,7 @@ extension ClusterShell { return self.ready(state: self.onDownCommand(context, state: state, member: myselfMember)) } -// private func notifyHandshakeFailure(state: HandshakeStateMachine.State, node: Node, error: Error) { +// private func notifyHandshakeFailure(state: HandshakeStateMachine.State, node: Cluster.Node, error: Error) { // switch state { // case .initiated(let initiated): // initiated.whenCompleted.fail(HandshakeConnectionError(node: node, message: "\(error)")) @@ -1205,7 +1205,7 @@ extension ClusterShell { context.unwatch(ref) } - let addrDesc = "\(state.settings.uniqueBindNode.node.host):\(state.settings.uniqueBindNode.node.port)" + let addrDesc = "\(state.settings.bindNode.endpoint.host):\(state.settings.bindNode.endpoint.port)" return context.awaitResult(of: state.channel.close(), timeout: context.system.settings.unbindTimeout) { // FIXME: also close all associations (!!!) switch $0 { @@ -1271,9 +1271,9 @@ extension ClusterShell { } // whenever we down a node we must ensure to confirm it to swim, so it won't keep monitoring it forever needlessly - self._swimShell.confirmDead(node: memberToDown.uniqueNode) + self._swimShell.confirmDead(node: memberToDown.node) - if memberToDown.uniqueNode == state.selfNode { + if memberToDown.node == state.selfNode { // ==== ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // Down(self node); ensuring SWIM knows about this and should likely initiate graceful shutdown context.log.warning( @@ -1302,7 +1302,7 @@ extension ClusterShell { // Terminate association and Down the (other) node state = self.interpretLeaderActions(context.system, state, state.collectLeaderActions()) - self.terminateAssociation(context.system, state: &state, memberToDown.uniqueNode) + self.terminateAssociation(context.system, state: &state, memberToDown.node) return state } } @@ -1312,13 +1312,13 @@ extension ClusterShell { // MARK: ClusterShell's actor address extension ActorID { - static func _clusterShell(on node: UniqueNode) -> ActorID { + static func _clusterShell(on node: Cluster.Node) -> ActorID { let id = ActorPath._clusterShell.makeRemoteID(on: node, incarnation: .wellKnown) // id.metadata.wellKnown = "$cluster" return id } - static func _clusterGossip(on node: UniqueNode) -> ActorID { + static func _clusterGossip(on node: Cluster.Node) -> ActorID { let id = ActorPath._clusterGossip.makeRemoteID(on: node, incarnation: .wellKnown) // id.metadata.wellKnown = "$gossip" return id diff --git a/Sources/DistributedCluster/Cluster/ClusterShellState.swift b/Sources/DistributedCluster/Cluster/ClusterShellState.swift index d4de9cfcf..79e7f206c 100644 --- a/Sources/DistributedCluster/Cluster/ClusterShellState.swift +++ b/Sources/DistributedCluster/Cluster/ClusterShellState.swift @@ -28,7 +28,7 @@ internal protocol ReadOnlyClusterState { var handshakeBackoff: BackoffStrategy { get } /// Unique address of the current node. - var selfNode: UniqueNode { get } + var selfNode: Cluster.Node { get } var selfMember: Cluster.Member { get } var settings: ClusterSystemSettings { get } @@ -46,7 +46,7 @@ internal struct ClusterShellState: ReadOnlyClusterState { let channel: Channel - let selfNode: UniqueNode + let selfNode: Cluster.Node var selfMember: Cluster.Member { if let member = self.membership.uniqueMember(self.selfNode) { return member @@ -68,7 +68,7 @@ internal struct ClusterShellState: ReadOnlyClusterState { let allocator: ByteBufferAllocator - var _handshakes: [Node: HandshakeStateMachine.State] = [:] + var _handshakes: [Cluster.Endpoint: HandshakeStateMachine.State] = [:] let gossiperControl: GossiperControl @@ -119,8 +119,8 @@ internal struct ClusterShellState: ReadOnlyClusterState { self.allocator = settings.allocator self.eventLoopGroup = settings.eventLoopGroup ?? settings.makeDefaultEventLoopGroup() - self.selfNode = settings.uniqueBindNode - self._latestGossip = Cluster.MembershipGossip(ownerNode: settings.uniqueBindNode) + self.selfNode = settings.bindNode + self._latestGossip = Cluster.MembershipGossip(ownerNode: settings.bindNode) self.events = events self.gossiperControl = gossiperControl @@ -139,7 +139,7 @@ extension ClusterShellState { /// /// This MAY return `inFlight`, in which case it means someone already initiated a handshake with given node, /// and we should _do nothing_ and trust that our `whenCompleted` will be notified when the already in-flight handshake completes. - mutating func initHandshake(with remoteNode: Node) -> HandshakeStateMachine.State { + mutating func initHandshake(with remoteNode: Cluster.Endpoint) -> HandshakeStateMachine.State { if let handshakeState = self.handshakeInProgress(with: remoteNode) { switch handshakeState { case .initiated: @@ -165,10 +165,10 @@ extension ClusterShellState { mutating func onHandshakeChannelConnected(initiated: HandshakeStateMachine.InitiatedState, channel: Channel) -> ClusterShellState { #if DEBUG - let handshakeInProgress: HandshakeStateMachine.State? = self.handshakeInProgress(with: initiated.remoteNode) + let handshakeInProgress: HandshakeStateMachine.State? = self.handshakeInProgress(with: initiated.remoteEndpoint) if case .some(.initiated(let existingInitiated)) = handshakeInProgress { - if existingInitiated.remoteNode != initiated.remoteNode { + if existingInitiated.remoteEndpoint != initiated.remoteEndpoint { fatalError( """ onHandshakeChannelConnected MUST be called with the existing ongoing initiated \ @@ -184,12 +184,12 @@ extension ClusterShellState { var initiated = initiated initiated.onConnectionEstablished(channel: channel) - self._handshakes[initiated.remoteNode] = .initiated(initiated) + self._handshakes[initiated.remoteEndpoint] = .initiated(initiated) return self } - func handshakeInProgress(with node: Node) -> HandshakeStateMachine.State? { - self._handshakes[node] + func handshakeInProgress(with endpoint: Cluster.Endpoint) -> HandshakeStateMachine.State? { + self._handshakes[endpoint] } /// Abort a handshake, clearing any of its state as well as closing the passed in channel @@ -197,8 +197,8 @@ extension ClusterShellState { /// /// - Faults: when called in wrong state of an ongoing handshake /// - Returns: if present, the (now removed) handshake state that was aborted, hil otherwise. - mutating func closeOutboundHandshakeChannel(with node: Node) -> HandshakeStateMachine.State? { - guard let state = self._handshakes.removeValue(forKey: node) else { + mutating func closeOutboundHandshakeChannel(with endpoint: Cluster.Endpoint) -> HandshakeStateMachine.State? { + guard let state = self._handshakes.removeValue(forKey: endpoint) else { return nil } switch state { @@ -257,7 +257,7 @@ extension ClusterShellState { mutating func onIncomingHandshakeOffer(offer: Wire.HandshakeOffer, existingAssociation: Association?, incomingChannel: Channel) -> OnIncomingHandshakeOfferDirective { func prepareNegotiation0() -> OnIncomingHandshakeOfferDirective { let fsm = HandshakeStateMachine.HandshakeOfferReceivedState(state: self, offer: offer) - self._handshakes[offer.originNode.node] = .wasOfferedHandshake(fsm) + self._handshakes[offer.originNode.endpoint] = .wasOfferedHandshake(fsm) return .negotiateIncoming(fsm) } @@ -267,20 +267,20 @@ extension ClusterShellState { () // continue, we'll perform the tie-breaker logic below case .associated: let error = HandshakeStateMachine.HandshakeConnectionError( - node: offer.originNode.node, + endpoint: offer.originNode.endpoint, message: "Terminating this connection, the node [\(offer.originNode)] is already associated. Possibly a delayed handshake retry message was delivered?" ) return .abortIncomingHandshake(error) case .tombstone: let error = HandshakeStateMachine.HandshakeConnectionError( - node: offer.originNode.node, + endpoint: offer.originNode.endpoint, message: "Terminating this connection, the node [\(offer.originNode)] is already tombstone-ed. Possibly a delayed handshake retry message was delivered?" ) return .abortIncomingHandshake(error) } } - guard let inProgress = self._handshakes[offer.originNode.node] else { + guard let inProgress = self._handshakes[offer.originNode.endpoint] else { // no other concurrent handshakes in progress; good, this is happy path, so we simply continue our negotiation return prepareNegotiation0() } @@ -305,7 +305,7 @@ extension ClusterShellState { ]) if tieBreakWinner { - if self.closeOutboundHandshakeChannel(with: offer.originNode.node) != nil { + if self.closeOutboundHandshakeChannel(with: offer.originNode.endpoint) != nil { self.log.debug( "Aborted handshake, as concurrently negotiating another one with same node already", metadata: [ @@ -321,7 +321,7 @@ extension ClusterShellState { // we "lost", the other node will send the accept; when it does, the will complete the future. // concurrent handshake and we should abort let error = HandshakeStateMachine.HandshakeConnectionError( - node: offer.originNode.node, + endpoint: offer.originNode.endpoint, message: """ Terminating this connection, as there is a concurrently established connection with same host [\(offer.originNode)] \ which will be used to complete the handshake. @@ -352,7 +352,7 @@ extension ClusterShellState { } mutating func incomingHandshakeAccept(_ accept: Wire.HandshakeAccept) -> HandshakeStateMachine.CompletedState? { - guard let inProgressHandshake = self._handshakes[accept.targetNode.node] else { + guard let inProgressHandshake = self._handshakes[accept.targetNode.endpoint] else { self.log.warning("Attempted to accept incoming [\(accept)] for handshake which was not in progress!", metadata: [ "clusterShell": "\(self)", "membership": "\(self.membership)", @@ -385,13 +385,13 @@ extension ClusterShellState { _ clusterShell: ClusterShell, _ handshake: HandshakeStateMachine.CompletedState, channel: Channel, file: String = #filePath, line: UInt = #line ) -> AssociatedDirective { - guard self._handshakes.removeValue(forKey: handshake.remoteNode.node) != nil else { + guard self._handshakes.removeValue(forKey: handshake.remoteNode.endpoint) != nil else { fatalError("Can not complete a handshake which was not in progress!") // TODO: perhaps we instead just warn and ignore this; since it should be harmless } let change: Cluster.MembershipChange? - if let replacedMember = self.membership.member(handshake.remoteNode.node) { + if let replacedMember = self.membership.member(handshake.remoteNode.endpoint) { change = self.membership.applyMembershipChange(Cluster.MembershipChange(replaced: replacedMember, by: Cluster.Member(node: handshake.remoteNode, status: .joining))) } else { change = self.membership.applyMembershipChange(Cluster.MembershipChange(member: Cluster.Member(node: handshake.remoteNode, status: .joining))) @@ -485,7 +485,7 @@ extension ClusterShellState { ] } - func metadataForHandshakes(node: Node? = nil, uniqueNode: UniqueNode? = nil, error err: Error?) -> Logger.Metadata { + func metadataForHandshakes(endpoint: Cluster.Endpoint? = nil, node: Cluster.Node? = nil, error err: Error?) -> Logger.Metadata { var metadata: Logger.Metadata = [ "handshakes": "\(self.handshakes())", @@ -494,7 +494,7 @@ extension ClusterShellState { if let n = node { metadata["handshake/peer"] = "\(n)" } - if let n = uniqueNode { + if let n = node { metadata["handshake/peer"] = "\(n)" } if let error = err { diff --git a/Sources/DistributedCluster/Cluster/DiscoveryShell.swift b/Sources/DistributedCluster/Cluster/DiscoveryShell.swift index 71c9aa1e8..b57687545 100644 --- a/Sources/DistributedCluster/Cluster/DiscoveryShell.swift +++ b/Sources/DistributedCluster/Cluster/DiscoveryShell.swift @@ -17,7 +17,7 @@ import ServiceDiscovery final class DiscoveryShell { enum Message: _NotActuallyCodableMessage { - case listing(Set) + case listing(Set) case stop(CompletionReason?) } @@ -25,7 +25,7 @@ final class DiscoveryShell { internal let cluster: ClusterShell.Ref private var subscription: CancellationToken? - internal var previouslyDiscoveredNodes: Set = [] + internal var previouslyDiscoveredNodes: Set = [] init(settings: ServiceDiscoverySettings, cluster: ClusterShell.Ref) { self.settings = settings @@ -54,8 +54,8 @@ final class DiscoveryShell { private var ready: _Behavior { _Behavior.receive { context, message in switch message { - case .listing(let discoveredNodes): - self.onUpdatedListing(discoveredNodes: discoveredNodes, context: context) + case .listing(let discoveredEndpoints): + self.onUpdatedListing(discoveredEndpoints: discoveredEndpoints, context: context) return .same case .stop(let reason): @@ -66,20 +66,20 @@ final class DiscoveryShell { } } - private func onUpdatedListing(discoveredNodes: Set, context: _ActorContext) { + private func onUpdatedListing(discoveredEndpoints: Set, context: _ActorContext) { context.log.trace("Service discovery updated listing", metadata: [ - "listing": Logger.MetadataValue.array(Array(discoveredNodes.map { + "listing": Logger.MetadataValue.array(Array(discoveredEndpoints.map { "\($0)" })), ]) - for newNode in discoveredNodes.subtracting(self.previouslyDiscoveredNodes) { + for newNode in discoveredEndpoints.subtracting(self.previouslyDiscoveredNodes) { context.log.trace("Discovered new node, initiating join", metadata: [ "node": "\(newNode)", "discovery/implementation": "\(self.settings.implementation)", ]) self.cluster.tell(.command(.handshakeWith(newNode))) } - self.previouslyDiscoveredNodes = discoveredNodes + self.previouslyDiscoveredNodes = discoveredEndpoints } func stop(reason: CompletionReason?, context: _ActorContext) -> _Behavior { diff --git a/Sources/DistributedCluster/Cluster/DistributedNodeDeathWatcher.swift b/Sources/DistributedCluster/Cluster/DistributedNodeDeathWatcher.swift index 2e08a0029..38e702030 100644 --- a/Sources/DistributedCluster/Cluster/DistributedNodeDeathWatcher.swift +++ b/Sources/DistributedCluster/Cluster/DistributedNodeDeathWatcher.swift @@ -15,7 +15,7 @@ import Distributed import Logging -/// Implements ``LifecycleWatch`` semantics in presence of ``Node`` failures. +/// Implements ``LifecycleWatch`` semantics in presence of ``Cluster/Endpoint`` failures. /// /// Depends on a failure detector (e.g. SWIM) to actually detect a node failure, however once detected, /// it handles notifying all _local_ actors which have watched at least one actor the terminating node. @@ -35,22 +35,22 @@ internal actor DistributedNodeDeathWatcher { private let log: Logger - private let selfNode: UniqueNode + private let selfNode: Cluster.Node private var membership: Cluster.Membership = .empty /// Members which have been `removed` // TODO: clear after a few days, or some max count of nodes, use sorted set for this - private var nodeTombstones: Set = [] + private var nodeTombstones: Set = [] /// Mapping between remote node, and actors which have watched some actors on given remote node. - private var remoteWatchCallbacks: [UniqueNode: Set] = [:] + private var remoteWatchCallbacks: [Cluster.Node: Set] = [:] private var eventListenerTask: Task? init(actorSystem: ActorSystem) async { let log = actorSystem.log self.log = log - self.selfNode = actorSystem.cluster.uniqueNode + self.selfNode = actorSystem.cluster.node // initialized let events = actorSystem.cluster.events @@ -74,9 +74,9 @@ internal actor DistributedNodeDeathWatcher { } func watchActor( - on remoteNode: UniqueNode, + on remoteNode: Cluster.Node, by watcher: ClusterSystem.ActorID, - whenTerminated nodeTerminatedFn: @escaping @Sendable (UniqueNode) async -> Void + whenTerminated nodeTerminatedFn: @escaping @Sendable (Cluster.Node) async -> Void ) { guard !self.nodeTombstones.contains(remoteNode) else { // the system the watcher is attempting to watch has terminated before the watch has been processed, @@ -101,7 +101,7 @@ internal actor DistributedNodeDeathWatcher { } } - func cleanupTombstone(node: UniqueNode) { + func cleanupTombstone(node: Cluster.Node) { _ = self.nodeTombstones.remove(node) } @@ -144,7 +144,7 @@ extension DistributedNodeDeathWatcher { struct WatcherAndCallback: Hashable { /// Address of the local watcher which had issued this watch let watcherID: ClusterSystem.ActorID - let callback: @Sendable (UniqueNode) async -> Void + let callback: @Sendable (Cluster.Node) async -> Void func hash(into hasher: inout Hasher) { hasher.combine(self.watcherID) diff --git a/Sources/DistributedCluster/Cluster/Downing/DowningSettings.swift b/Sources/DistributedCluster/Cluster/Downing/DowningSettings.swift index 64d575103..74acae7bf 100644 --- a/Sources/DistributedCluster/Cluster/Downing/DowningSettings.swift +++ b/Sources/DistributedCluster/Cluster/Downing/DowningSettings.swift @@ -34,7 +34,7 @@ public struct DowningStrategySettings { case .none: return nil case .timeout(let settings): - return TimeoutBasedDowningStrategy(settings, selfNode: clusterSystemSettings.uniqueBindNode) + return TimeoutBasedDowningStrategy(settings, selfNode: clusterSystemSettings.bindNode) } } diff --git a/Sources/DistributedCluster/Cluster/Downing/DowningStrategy.swift b/Sources/DistributedCluster/Cluster/Downing/DowningStrategy.swift index 7a786337b..dd5410500 100644 --- a/Sources/DistributedCluster/Cluster/Downing/DowningStrategy.swift +++ b/Sources/DistributedCluster/Cluster/Downing/DowningStrategy.swift @@ -146,7 +146,7 @@ internal distributed actor DowningStrategyShell { for member in members { self.log.info( "Decision to [.down] member [\(member)]!", metadata: self.metadata([ - "downing/node": "\(reflecting: member.uniqueNode)", + "downing/node": "\(reflecting: member.node)", ]) ) self.actorSystem.cluster.down(member: member) diff --git a/Sources/DistributedCluster/Cluster/Downing/TimeoutBasedDowningStrategy.swift b/Sources/DistributedCluster/Cluster/Downing/TimeoutBasedDowningStrategy.swift index 5e52ea66f..31ee45388 100644 --- a/Sources/DistributedCluster/Cluster/Downing/TimeoutBasedDowningStrategy.swift +++ b/Sources/DistributedCluster/Cluster/Downing/TimeoutBasedDowningStrategy.swift @@ -19,7 +19,7 @@ /// If a node becomes reachable again before the timeout expires, it will not be considered for downing anymore. public final class TimeoutBasedDowningStrategy: DowningStrategy { let settings: TimeoutBasedDowningStrategySettings - let selfNode: UniqueNode + let selfNode: Cluster.Node var membership: Cluster.Membership @@ -33,7 +33,7 @@ public final class TimeoutBasedDowningStrategy: DowningStrategy { // buffer for nodes that will be marked down, if this node becomes the leader var _markAsDown: Set - init(_ settings: TimeoutBasedDowningStrategySettings, selfNode: UniqueNode) { + init(_ settings: TimeoutBasedDowningStrategySettings, selfNode: Cluster.Node) { self.settings = settings self.selfNode = selfNode self._unreachable = [] diff --git a/Sources/DistributedCluster/Cluster/HandshakeStateMachine.swift b/Sources/DistributedCluster/Cluster/HandshakeStateMachine.swift index 3636f6bbf..0b1bd469d 100644 --- a/Sources/DistributedCluster/Cluster/HandshakeStateMachine.swift +++ b/Sources/DistributedCluster/Cluster/HandshakeStateMachine.swift @@ -72,8 +72,8 @@ internal struct HandshakeStateMachine { self.settings.protocolVersion } - let remoteNode: Node - let localNode: UniqueNode + let remoteEndpoint: Cluster.Endpoint + let localNode: Cluster.Node var handshakeReconnectBackoff: BackoffStrategy @@ -84,18 +84,18 @@ internal struct HandshakeStateMachine { var channel: Channel? init( - settings: ClusterSystemSettings, localNode: UniqueNode, connectTo remoteNode: Node + settings: ClusterSystemSettings, localNode: Cluster.Node, connectTo remoteEndpoint: Cluster.Endpoint ) { - precondition(localNode.node != remoteNode, "MUST NOT attempt connecting to own bind address. Address: \(remoteNode)") + precondition(localNode.endpoint != remoteEndpoint, "MUST NOT attempt connecting to own bind address. Address: \(remoteEndpoint)") self.settings = settings self.localNode = localNode - self.remoteNode = remoteNode + self.remoteEndpoint = remoteEndpoint self.handshakeReconnectBackoff = settings.handshakeReconnectBackoff // copy since we want to mutate it as the handshakes attempt retries } func makeOffer() -> Wire.HandshakeOffer { // TODO: maybe store also at what time we sent the handshake, so we can diagnose if we should reject replies for being late etc - Wire.HandshakeOffer(version: self.protocolVersion, originNode: self.localNode, targetNode: self.remoteNode) + Wire.HandshakeOffer(version: self.protocolVersion, originNode: self.localNode, targetEndpoint: self.remoteEndpoint) } mutating func onConnectionEstablished(channel: Channel) { @@ -105,7 +105,7 @@ internal struct HandshakeStateMachine { // TODO: call into an connection error? // TODO: the remote REJECTING must not trigger backoffs mutating func onHandshakeTimeout() -> RetryDirective { - self.onConnectionError(HandshakeConnectionError(node: self.remoteNode, message: "Handshake timed out")) // TODO: improve msgs + self.onConnectionError(HandshakeConnectionError(endpoint: self.remoteEndpoint, message: "Handshake timed out")) // TODO: improve msgs } mutating func onConnectionError(_: Error) -> RetryDirective { @@ -119,7 +119,7 @@ internal struct HandshakeStateMachine { var description: Swift.String { """ InitiatedState(\ - remoteNode: \(self.remoteNode), \ + remoteNode: \(self.remoteEndpoint), \ localNode: \(self.localNode), \ channel: \(optional: self.channel)\ ) @@ -128,7 +128,7 @@ internal struct HandshakeStateMachine { } struct HandshakeConnectionError: Error, Equatable { - let node: Node // TODO: allow carrying UniqueNode + let endpoint: Cluster.Endpoint // TODO: allow carrying Cluster.Node let message: String } @@ -151,7 +151,7 @@ internal struct HandshakeStateMachine { private let state: ReadOnlyClusterState let offer: Wire.HandshakeOffer - var boundAddress: UniqueNode { + var boundAddress: Cluster.Node { self.state.selfNode } @@ -165,7 +165,7 @@ internal struct HandshakeStateMachine { } func negotiate() -> HandshakeStateMachine.NegotiateDirective { - guard self.boundAddress.node == self.offer.targetNode else { + guard self.boundAddress.endpoint == self.offer.targetEndpoint else { let error = HandshakeError.targetHandshakeAddressMismatch(self.offer, selfNode: self.boundAddress) let rejectedState = RejectedState(fromReceived: self, remoteNode: self.offer.originNode, error: error) @@ -209,8 +209,8 @@ internal struct HandshakeStateMachine { /// This state is used to unlock creating a completed Association. internal struct CompletedState { let protocolVersion: ClusterSystem.Version - var remoteNode: UniqueNode - var localNode: UniqueNode + var remoteNode: Cluster.Node + var localNode: Cluster.Node // let whenCompleted: EventLoopPromise // let unique association ID? @@ -218,7 +218,7 @@ internal struct HandshakeStateMachine { // // Since the client is the one who initiates the handshake, once it receives an Accept containing the remote unique node // it may immediately transition to the completed state. - init(fromInitiated initiated: InitiatedState, remoteNode: UniqueNode) { + init(fromInitiated initiated: InitiatedState, remoteNode: Cluster.Node) { precondition(initiated.localNode != remoteNode, "Node [\(initiated.localNode)] attempted to create association with itself.") self.protocolVersion = initiated.protocolVersion self.remoteNode = remoteNode @@ -227,7 +227,7 @@ internal struct HandshakeStateMachine { } // State Transition used by Server Side on accepting a received Handshake. - init(fromReceived received: HandshakeOfferReceivedState, remoteNode: UniqueNode) { + init(fromReceived received: HandshakeOfferReceivedState, remoteNode: Cluster.Node) { precondition(received.boundAddress != remoteNode, "Node [\(received.boundAddress)] attempted to create association with itself.") self.protocolVersion = received.protocolVersion self.remoteNode = remoteNode @@ -247,11 +247,11 @@ internal struct HandshakeStateMachine { internal struct RejectedState { let protocolVersion: ClusterSystem.Version - let localNode: UniqueNode - let remoteNode: UniqueNode + let localNode: Cluster.Node + let remoteNode: Cluster.Node let error: HandshakeError - init(fromReceived state: HandshakeOfferReceivedState, remoteNode: UniqueNode, error: HandshakeError) { + init(fromReceived state: HandshakeOfferReceivedState, remoteNode: Cluster.Node, error: HandshakeError) { self.protocolVersion = state.protocolVersion self.localNode = state.boundAddress self.remoteNode = remoteNode @@ -300,12 +300,12 @@ enum HandshakeError: Error { /// where the handshake was received at "the wrong node". /// /// The UID part of the `Node` does not matter for this check, but is included here for debugging purposes. - case targetHandshakeAddressMismatch(Wire.HandshakeOffer, selfNode: UniqueNode) + case targetHandshakeAddressMismatch(Wire.HandshakeOffer, selfNode: Cluster.Node) /// Returned when an incoming handshake protocol version does not match what this node can understand. case incompatibleProtocolVersion(local: ClusterSystem.Version, remote: ClusterSystem.Version) - case targetRejectedHandshake(selfNode: UniqueNode, remoteNode: UniqueNode, message: String) + case targetRejectedHandshake(selfNode: Cluster.Node, remoteNode: Cluster.Node, message: String) - case targetAlreadyTombstone(selfNode: UniqueNode, remoteNode: UniqueNode) + case targetAlreadyTombstone(selfNode: Cluster.Node, remoteNode: Cluster.Node) } diff --git a/Sources/DistributedCluster/Cluster/Leadership.swift b/Sources/DistributedCluster/Cluster/Leadership.swift index bc34978e2..12db930ec 100644 --- a/Sources/DistributedCluster/Cluster/Leadership.swift +++ b/Sources/DistributedCluster/Cluster/Leadership.swift @@ -115,7 +115,7 @@ extension Leadership { context.system.cluster.events.subscribe(context.myself) // FIXME: we have to add "own node" since we're not getting the .snapshot... so we have to manually act as if.. - _ = self.membership.applyMembershipChange(Cluster.MembershipChange(node: context.system.cluster.uniqueNode, previousStatus: nil, toStatus: .joining)) + _ = self.membership.applyMembershipChange(Cluster.MembershipChange(node: context.system.cluster.node, previousStatus: nil, toStatus: .joining)) return self.runElection(context) } } diff --git a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip+Serialization.swift b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip+Serialization.swift index f6cee22e3..820f67ffe 100644 --- a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip+Serialization.swift +++ b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip+Serialization.swift @@ -17,7 +17,7 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { public func toProto(context: Serialization.Context) throws -> ProtobufRepresentation { var proto = _ProtoClusterMembershipGossip() - proto.ownerUniqueNodeID = self.owner.nid.value + proto.ownerClusterNodeID = self.owner.nid.value proto.membership = try self.membership.toProto(context: context) // we manually ensure we encode using node identifiers, rather than full unique nodes to save space: @@ -25,7 +25,7 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { protoSeenTable.rows.reserveCapacity(self.seen.underlying.count) for (node, seenVersion) in self.seen.underlying { var row = _ProtoClusterMembershipSeenTableRow() - row.uniqueNodeID = node.nid.value + row.nodeID = node.nid.value row.version = try seenVersion.toCompactReplicaNodeIDProto(context: context) protoSeenTable.rows.append(row) } @@ -35,8 +35,8 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { } public init(fromProto proto: ProtobufRepresentation, context: Serialization.Context) throws { - guard proto.ownerUniqueNodeID != 0 else { - throw SerializationError(.missingField("ownerUniqueNodeID", type: "\(reflecting: Cluster.MembershipGossip.self)")) + guard proto.ownerClusterNodeID != 0 else { + throw SerializationError(.missingField("ownerNodeID", type: "\(reflecting: Cluster.MembershipGossip.self)")) } guard proto.hasMembership else { throw SerializationError(.missingField("membership", type: "\(reflecting: Cluster.MembershipGossip.self)")) @@ -47,16 +47,16 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { let membership = try Cluster.Membership(fromProto: proto.membership, context: context) - let ownerID = UniqueNodeID(proto.ownerUniqueNodeID) - guard let ownerNode = membership.member(byUniqueNodeID: ownerID)?.uniqueNode else { - throw SerializationError(.unableToDeserialize(hint: "Missing member for ownerUniqueNodeID, members: \(membership)")) + let ownerID = Cluster.Node.ID(proto.ownerClusterNodeID) + guard let ownerNode = membership.member(byUniqueNodeID: ownerID)?.node else { + throw SerializationError(.unableToDeserialize(hint: "Missing member for ownerNodeID, members: \(membership)")) } var gossip = Cluster.MembershipGossip(ownerNode: ownerNode) gossip.membership = membership gossip.seen.underlying.reserveCapacity(proto.seenTable.rows.count) for row in proto.seenTable.rows { - let nodeID: UniqueNodeID = .init(row.uniqueNodeID) + let nodeID: Cluster.Node.ID = .init(row.nodeID) guard let member = membership.member(byUniqueNodeID: nodeID) else { throw SerializationError(.unableToDeserialize(hint: "Missing Member for unique node id: \(nodeID), members: \(membership)")) } @@ -68,13 +68,13 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { let replicaID: ReplicaID switch protoReplicaVersion.replicaID.value { - case .some(.uniqueNodeID(let id)): + case .some(.nodeID(let id)): guard let member = membership.member(byUniqueNodeID: .init(id)) else { continue } - replicaID = .uniqueNode(member.uniqueNode) - case .some(.uniqueNode(let protoUniqueNode)): - replicaID = try .uniqueNode(.init(fromProto: protoUniqueNode, context: context)) + replicaID = .node(member.node) + case .some(.node(let protoUniqueNode)): + replicaID = try .node(.init(fromProto: protoUniqueNode, context: context)) case .some(.actorID(let address)): context.log.warning("Unexpected .actorID key in replicaVersion of Cluster.MembershipGossip, which is expected to only use unique node ids as replica versions; was: \(address)") continue @@ -85,7 +85,7 @@ extension Cluster.MembershipGossip: _ProtobufRepresentable { replicaVersions.append(VersionVector.ReplicaVersion(replicaID: replicaID, version: v)) } - gossip.seen.underlying[member.uniqueNode] = VersionVector(replicaVersions) + gossip.seen.underlying[member.node] = VersionVector(replicaVersions) } self = gossip diff --git a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip.swift b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip.swift index 44142bc0c..5b840fac3 100644 --- a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip.swift +++ b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossip.swift @@ -20,7 +20,7 @@ extension Cluster { /// /// Used to guarantee phrases like "all nodes have seen a node A in status S", upon which the Leader may act. struct MembershipGossip: Codable, Equatable { - let owner: UniqueNode + let owner: Cluster.Node /// A table maintaining our perception of other nodes views on the version of membership. /// Each row in the table represents what versionVector we know the given node has observed recently. /// It may have in the mean time of course observed a new version already. @@ -36,9 +36,9 @@ extension Cluster { /// IMPORTANT: Whenever the membership is updated with an effective change, we MUST move the version forward (!) var membership: Cluster.Membership - init(ownerNode: UniqueNode) { + init(ownerNode: Cluster.Node) { self.owner = ownerNode - // self.seen = Cluster.Gossip.SeenTable(myselfNode: ownerNode, version: VersionVector((.uniqueNode(ownerNode), 1))) + // self.seen = Cluster.Gossip.SeenTable(myselfNode: ownerNode, version: VersionVector((.node(ownerNode), 1))) self.seen = Cluster.MembershipGossip.SeenTable(myselfNode: ownerNode, version: VersionVector()) // The actual payload @@ -83,7 +83,7 @@ extension Cluster { // 1.2) Protect from zombies: Any nodes that we know are dead or down, we should not accept any information from let incomingConcurrentDownMembers = incoming.membership.members(atLeast: .down) for pruneFromIncomingBeforeMerge in incomingConcurrentDownMembers - where self.membership.uniqueMember(pruneFromIncomingBeforeMerge.uniqueNode) == nil + where self.membership.uniqueMember(pruneFromIncomingBeforeMerge.node) == nil { _ = incoming.pruneMember(pruneFromIncomingBeforeMerge) } @@ -105,9 +105,9 @@ extension Cluster { // 3) if any removals happened, we need to prune the removed nodes from the seen table for change in changes - where change.status.isRemoved && change.member.uniqueNode != self.owner + where change.status.isRemoved && change.member.node != self.owner { - self.seen.prune(change.member.uniqueNode) + self.seen.prune(change.member.node) } return .init(causalRelation: causalRelation, effectiveChanges: changes) @@ -115,8 +115,8 @@ extension Cluster { /// Remove member from `membership` and prune the seen tables of any trace of the removed node. mutating func pruneMember(_ member: Member) -> Cluster.MembershipChange? { - self.seen.prune(member.uniqueNode) // always prune is okey - let change = self.membership.removeCompletely(member.uniqueNode) + self.seen.prune(member.node) // always prune is okey + let change = self.membership.removeCompletely(member.node) return change } @@ -144,7 +144,7 @@ extension Cluster { } let laggingBehindMemberFound = members.contains { member in - if let memberSeenVersion = self.seen.version(at: member.uniqueNode) { + if let memberSeenVersion = self.seen.version(at: member.node) { switch memberSeenVersion.compareTo(requiredVersion) { case .happenedBefore, .concurrent: return true // found an offending member, it is lagging behind, thus no convergence @@ -183,17 +183,17 @@ extension Cluster.MembershipGossip { /// - node C (we think): has never seen any gossip from either A or B, realistically though it likely has, /// however it has not yet sent a gossip to "us" such that we could have gotten its updated version vector. struct SeenTable: Equatable { - var underlying: [UniqueNode: VersionVector] + var underlying: [Cluster.Node: VersionVector] init() { self.underlying = [:] } - init(myselfNode: UniqueNode, version: VersionVector) { + init(myselfNode: Cluster.Node, version: VersionVector) { self.underlying = [myselfNode: version] } - var nodes: Dictionary.Keys { + var nodes: Dictionary.Keys { self.underlying.keys } @@ -202,7 +202,7 @@ extension Cluster.MembershipGossip { /// - Returns: The `node`'s version's relationship to the latest version. /// E.g. `.happenedBefore` if the latest version is known to be more "recent" than the node's observed version. /// - SeeAlso: The definition of `VersionVector.CausalRelation` for detailed discussion of all possible relations. - func compareVersion(observedOn owner: UniqueNode, to incomingVersion: VersionVector) -> VersionVector.CausalRelation { + func compareVersion(observedOn owner: Cluster.Node, to incomingVersion: VersionVector) -> VersionVector.CausalRelation { /// We know that the node has seen _at least_ the membership at `nodeVersion`. (self.underlying[owner] ?? VersionVector()).compareTo(incomingVersion) } @@ -212,7 +212,7 @@ extension Cluster.MembershipGossip { /// /// In other words, we gained information and our membership has "moved forward". /// - mutating func merge(selfOwner: UniqueNode, incoming: SeenTable) { + mutating func merge(selfOwner: Cluster.Node, incoming: SeenTable) { var ownerVersion = self.version(at: selfOwner) ?? VersionVector() for incomingNode in incoming.nodes { @@ -226,7 +226,7 @@ extension Cluster.MembershipGossip { self.underlying[selfOwner] = ownerVersion } - // TODO: func haveNotYetSeen(version: VersionVector): [UniqueNode] + // TODO: func haveNotYetSeen(version: VersionVector): [Cluster.Node] /// Increments a specific ReplicaVersion, in the view owned by the `owner`. /// @@ -240,14 +240,14 @@ extension Cluster.MembershipGossip { /// To obtain `A | A:2, B:10`, after which we know that our view is "ahead or concurrent" because of the difference /// in the A field, meaning we need to gossip with B to converge those two version vectors. @discardableResult - mutating func incrementVersion(owner: UniqueNode, at node: UniqueNode) -> VersionVector { + mutating func incrementVersion(owner: Cluster.Node, at node: Cluster.Node) -> VersionVector { if var version = self.underlying[owner] { - version.increment(at: .uniqueNode(node)) + version.increment(at: .node(node)) self.underlying[owner] = version return version } else { // we treat incrementing from "nothing" as creating a new entry - let version = VersionVector((.uniqueNode(node), 1)) + let version = VersionVector((.node(node), 1)) self.underlying[owner] = version return version } @@ -256,7 +256,7 @@ extension Cluster.MembershipGossip { /// View a version vector at a specific node. /// This "view" represents "our" latest information about what we know that node has observed. /// This information may (and most likely is) outdated as the nodes continue to gossip to one another. - func version(at node: UniqueNode) -> VersionVector? { + func version(at node: Cluster.Node) -> VersionVector? { self.underlying[node] } @@ -267,9 +267,9 @@ extension Cluster.MembershipGossip { /// Performing this operation should be done with great care, as it means that if "the same exact node" were /// to "come back" it would be indistinguishable from being a new node. Measures to avoid this from happening /// must be taken on the cluster layer, by using and checking for tombstones. // TODO: make a nasty test for this, a simple one we got; See MembershipGossipSeenTableTests - mutating func prune(_ nodeToPrune: UniqueNode) { + mutating func prune(_ nodeToPrune: Cluster.Node) { _ = self.underlying.removeValue(forKey: nodeToPrune) - let replicaToPrune: ReplicaID = .uniqueNode(nodeToPrune) + let replicaToPrune: ReplicaID = .node(nodeToPrune) for (key, version) in self.underlying where version.contains(replicaToPrune, 0) { self.underlying[key] = version.pruneReplica(replicaToPrune) diff --git a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossipLogic.swift b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossipLogic.swift index 60b7fa229..7246a7d12 100644 --- a/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossipLogic.swift +++ b/Sources/DistributedCluster/Cluster/MembershipGossip/Cluster+MembershipGossipLogic.swift @@ -26,7 +26,7 @@ final class MembershipGossipLogic: GossipLogic, CustomStringConvertible { typealias Acknowledgement = Cluster.MembershipGossip private let context: Context - internal lazy var localNode: UniqueNode = self.context.system.cluster.uniqueNode + internal lazy var localNode: Cluster.Node = self.context.system.cluster.node internal var latestGossip: Cluster.MembershipGossip private let notifyOnGossipRef: _ActorRef @@ -47,7 +47,7 @@ final class MembershipGossipLogic: GossipLogic, CustomStringConvertible { init(_ context: Context, notifyOnGossipRef: _ActorRef) { self.context = context self.notifyOnGossipRef = notifyOnGossipRef - self.latestGossip = .init(ownerNode: context.system.cluster.uniqueNode) + self.latestGossip = .init(ownerNode: context.system.cluster.node) } // ==== ------------------------------------------------------------------------------------------------------------ diff --git a/Sources/DistributedCluster/Cluster/NodeDeathWatcher.swift b/Sources/DistributedCluster/Cluster/NodeDeathWatcher.swift index 9e4aea05b..316c1bb60 100644 --- a/Sources/DistributedCluster/Cluster/NodeDeathWatcher.swift +++ b/Sources/DistributedCluster/Cluster/NodeDeathWatcher.swift @@ -32,17 +32,17 @@ import NIO /// /// Allows manually mocking membership changes to trigger terminated notifications. internal final class NodeDeathWatcherInstance: NodeDeathWatcher { - private let selfNode: UniqueNode + private let selfNode: Cluster.Node private var membership: Cluster.Membership /// Members which have been `removed` // TODO: clear after a few days, or some max count of nodes, use sorted set for this - private var nodeTombstones: Set = [] + private var nodeTombstones: Set = [] struct WatcherAndCallback: Hashable { /// Address of the local watcher which had issued this watch let watcherID: ClusterSystem.ActorID - let callback: @Sendable (UniqueNode) async -> Void + let callback: @Sendable (Cluster.Node) async -> Void func hash(into hasher: inout Hasher) { hasher.combine(self.watcherID) @@ -54,16 +54,16 @@ internal final class NodeDeathWatcherInstance: NodeDeathWatcher { } /// Mapping between remote node, and actors which have watched some actors on given remote node. - private var remoteWatchers: [UniqueNode: Set<_AddressableActorRef>] = [:] - private var remoteWatchCallbacks: [UniqueNode: Set] = [:] + private var remoteWatchers: [Cluster.Node: Set<_AddressableActorRef>] = [:] + private var remoteWatchCallbacks: [Cluster.Node: Set] = [:] - init(selfNode: UniqueNode) { + init(selfNode: Cluster.Node) { self.selfNode = selfNode self.membership = .empty } @available(*, deprecated, message: "will be replaced by distributed actor / closure version") - func onActorWatched(by watcher: _AddressableActorRef, remoteNode: UniqueNode) { + func onActorWatched(by watcher: _AddressableActorRef, remoteNode: Cluster.Node) { guard !self.nodeTombstones.contains(remoteNode) else { // the system the watcher is attempting to watch has terminated before the watch has been processed, // thus we have to immediately reply with a termination system message, as otherwise it would never receive one @@ -86,9 +86,9 @@ internal final class NodeDeathWatcherInstance: NodeDeathWatcher { } func onActorWatched( - on remoteNode: UniqueNode, + on remoteNode: Cluster.Node, by watcher: ClusterSystem.ActorID, - whenTerminated nodeTerminatedFn: @escaping @Sendable (UniqueNode) async -> Void + whenTerminated nodeTerminatedFn: @escaping @Sendable (Cluster.Node) async -> Void ) { guard !self.nodeTombstones.contains(remoteNode) else { // the system the watcher is attempting to watch has terminated before the watch has been processed, @@ -158,7 +158,7 @@ internal protocol NodeDeathWatcher { /// Called when the `watcher` watches a remote actor which resides on the `remoteNode`. /// A failure detector may have to start monitoring this node using some internal mechanism, /// in order to be able to signal the watcher in case the node terminates (e.g. the node crashes). - func onActorWatched(by watcher: _AddressableActorRef, remoteNode: UniqueNode) + func onActorWatched(by watcher: _AddressableActorRef, remoteNode: Cluster.Node) /// Called when the cluster membership changes. /// @@ -181,8 +181,8 @@ enum NodeDeathWatcherShell { /// By default, the `FailureDetectorShell` handles these messages by interpreting them with an underlying `FailureDetector`, /// it would be possible however to allow implementing the raw protocol by user actors if we ever see the need for it. internal enum Message: _NotActuallyCodableMessage { - case remoteActorWatched(watcher: _AddressableActorRef, remoteNode: UniqueNode) - case remoteDistributedActorWatched(remoteNode: UniqueNode, watcherID: ClusterSystem.ActorID, nodeTerminated: @Sendable (UniqueNode) async -> Void) + case remoteActorWatched(watcher: _AddressableActorRef, remoteNode: Cluster.Node) + case remoteDistributedActorWatched(remoteNode: Cluster.Node, watcherID: ClusterSystem.ActorID, nodeTerminated: @Sendable (Cluster.Node) async -> Void) case removeWatcher(watcherID: ClusterSystem.ActorID) case membershipSnapshot(Cluster.Membership) case membershipChange(Cluster.MembershipChange) @@ -191,7 +191,7 @@ enum NodeDeathWatcherShell { // FIXME: death watcher is incomplete, should handle snapshot!! static func behavior(clusterEvents: ClusterEventStream) -> _Behavior { .setup { context in - let instance = NodeDeathWatcherInstance(selfNode: context.system.settings.uniqueBindNode) + let instance = NodeDeathWatcherInstance(selfNode: context.system.settings.bindNode) let onClusterEventRef = context.subReceive(Cluster.Event.self) { event in switch event { @@ -251,6 +251,6 @@ enum NodeDeathWatcherShell { // MARK: Errors enum NodeDeathWatcherError: Error { - case attemptedToFailUnknownAddress(Cluster.Membership, UniqueNode) - case watcherActorWasNotLocal(watcherID: ActorID, localNode: UniqueNode?) + case attemptedToFailUnknownAddress(Cluster.Membership, Cluster.Node) + case watcherActorWasNotLocal(watcherID: ActorID, localNode: Cluster.Node?) } diff --git a/Sources/DistributedCluster/Cluster/Protobuf/Cluster.pb.swift b/Sources/DistributedCluster/Cluster/Protobuf/Cluster.pb.swift index a08a634f7..e8be27ed0 100644 --- a/Sources/DistributedCluster/Cluster/Protobuf/Cluster.pb.swift +++ b/Sources/DistributedCluster/Cluster/Protobuf/Cluster.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Cluster/Cluster.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -39,23 +38,26 @@ public struct _ProtoClusterShellMessage { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var message: _ProtoClusterShellMessage.OneOf_Message? = nil + public var message: OneOf_Message? { + get {return _storage._message} + set {_uniqueStorage()._message = newValue} + } /// Not all messages are serializable, on purpose, as they are not intended to cross over the network public var clusterEvent: _ProtoClusterEvent { get { - if case .clusterEvent(let v)? = message {return v} + if case .clusterEvent(let v)? = _storage._message {return v} return _ProtoClusterEvent() } - set {message = .clusterEvent(newValue)} + set {_uniqueStorage()._message = .clusterEvent(newValue)} } public var inbound: _ProtoClusterInbound { get { - if case .inbound(let v)? = message {return v} + if case .inbound(let v)? = _storage._message {return v} return _ProtoClusterInbound() } - set {message = .inbound(newValue)} + set {_uniqueStorage()._message = .inbound(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -67,18 +69,9 @@ public struct _ProtoClusterShellMessage { #if !swift(>=4.1) public static func ==(lhs: _ProtoClusterShellMessage.OneOf_Message, rhs: _ProtoClusterShellMessage.OneOf_Message) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.clusterEvent, .clusterEvent): return { - guard case .clusterEvent(let l) = lhs, case .clusterEvent(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.inbound, .inbound): return { - guard case .inbound(let l) = lhs, case .inbound(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.clusterEvent(let l), .clusterEvent(let r)): return l == r + case (.inbound(let l), .inbound(let r)): return l == r default: return false } } @@ -86,6 +79,8 @@ public struct _ProtoClusterShellMessage { } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterInbound { @@ -94,14 +89,17 @@ public struct _ProtoClusterInbound { // methods supported on all messages. /// Not all messages are serializable, on purpose, as they are not intended to cross over the network - public var message: _ProtoClusterInbound.OneOf_Message? = nil + public var message: OneOf_Message? { + get {return _storage._message} + set {_uniqueStorage()._message = newValue} + } public var restInPeace: _ProtoClusterRestInPeace { get { - if case .restInPeace(let v)? = message {return v} + if case .restInPeace(let v)? = _storage._message {return v} return _ProtoClusterRestInPeace() } - set {message = .restInPeace(newValue)} + set {_uniqueStorage()._message = .restInPeace(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -112,20 +110,16 @@ public struct _ProtoClusterInbound { #if !swift(>=4.1) public static func ==(lhs: _ProtoClusterInbound.OneOf_Message, rhs: _ProtoClusterInbound.OneOf_Message) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.restInPeace, .restInPeace): return { - guard case .restInPeace(let l) = lhs, case .restInPeace(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.restInPeace(let l), .restInPeace(let r)): return l == r } } #endif } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterRestInPeace { @@ -133,30 +127,29 @@ public struct _ProtoClusterRestInPeace { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var targetNode: _ProtoUniqueNode { - get {return _targetNode ?? _ProtoUniqueNode()} - set {_targetNode = newValue} + public var targetNode: _ProtoClusterNode { + get {return _storage._targetNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._targetNode = newValue} } /// Returns true if `targetNode` has been explicitly set. - public var hasTargetNode: Bool {return self._targetNode != nil} + public var hasTargetNode: Bool {return _storage._targetNode != nil} /// Clears the value of `targetNode`. Subsequent reads from it will return its default value. - public mutating func clearTargetNode() {self._targetNode = nil} + public mutating func clearTargetNode() {_uniqueStorage()._targetNode = nil} - public var fromNode: _ProtoUniqueNode { - get {return _fromNode ?? _ProtoUniqueNode()} - set {_fromNode = newValue} + public var fromNode: _ProtoClusterNode { + get {return _storage._fromNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._fromNode = newValue} } /// Returns true if `fromNode` has been explicitly set. - public var hasFromNode: Bool {return self._fromNode != nil} + public var hasFromNode: Bool {return _storage._fromNode != nil} /// Clears the value of `fromNode`. Subsequent reads from it will return its default value. - public mutating func clearFromNode() {self._fromNode = nil} + public mutating func clearFromNode() {_uniqueStorage()._fromNode = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _targetNode: _ProtoUniqueNode? = nil - fileprivate var _fromNode: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } // MARK: - Code below here is support for the SwiftProtobuf runtime. @@ -168,63 +161,75 @@ extension _ProtoClusterShellMessage: SwiftProtobuf.Message, SwiftProtobuf._Messa 2: .same(proto: "inbound"), ] + fileprivate class _StorageClass { + var _message: _ProtoClusterShellMessage.OneOf_Message? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _message = source._message + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoClusterEvent? - var hadOneofValue = false - if let current = self.message { - hadOneofValue = true - if case .clusterEvent(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.message = .clusterEvent(v) - } - }() - case 2: try { - var v: _ProtoClusterInbound? - var hadOneofValue = false - if let current = self.message { - hadOneofValue = true - if case .inbound(let m) = current {v = m} + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoClusterEvent? + if let current = _storage._message { + try decoder.handleConflictingOneOf() + if case .clusterEvent(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._message = .clusterEvent(v)} + case 2: + var v: _ProtoClusterInbound? + if let current = _storage._message { + try decoder.handleConflictingOneOf() + if case .inbound(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._message = .inbound(v)} + default: break } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.message = .inbound(v) - } - }() - default: break } } } public func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.message { - case .clusterEvent?: try { - guard case .clusterEvent(let v)? = self.message else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .inbound?: try { - guard case .inbound(let v)? = self.message else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._message { + case .clusterEvent(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .inbound(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterShellMessage, rhs: _ProtoClusterShellMessage) -> Bool { - if lhs.message != rhs.message {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._message != rhs_storage._message {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -236,39 +241,63 @@ extension _ProtoClusterInbound: SwiftProtobuf.Message, SwiftProtobuf._MessageImp 1: .same(proto: "restInPeace"), ] + fileprivate class _StorageClass { + var _message: _ProtoClusterInbound.OneOf_Message? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _message = source._message + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoClusterRestInPeace? - var hadOneofValue = false - if let current = self.message { - hadOneofValue = true - if case .restInPeace(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.message = .restInPeace(v) + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoClusterRestInPeace? + if let current = _storage._message { + try decoder.handleConflictingOneOf() + if case .restInPeace(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._message = .restInPeace(v)} + default: break } - }() - default: break } } } public func traverse(visitor: inout V) throws { - if case .restInPeace(let v)? = self.message { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if case .restInPeace(let v)? = _storage._message { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterInbound, rhs: _ProtoClusterInbound) -> Bool { - if lhs.message != rhs.message {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._message != rhs_storage._message {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -281,32 +310,63 @@ extension _ProtoClusterRestInPeace: SwiftProtobuf.Message, SwiftProtobuf._Messag 2: .same(proto: "fromNode"), ] + fileprivate class _StorageClass { + var _targetNode: _ProtoClusterNode? = nil + var _fromNode: _ProtoClusterNode? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _targetNode = source._targetNode + _fromNode = source._fromNode + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._targetNode) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._fromNode) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._targetNode) + case 2: try decoder.decodeSingularMessageField(value: &_storage._fromNode) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._targetNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._fromNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._targetNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._fromNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterRestInPeace, rhs: _ProtoClusterRestInPeace) -> Bool { - if lhs._targetNode != rhs._targetNode {return false} - if lhs._fromNode != rhs._fromNode {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._targetNode != rhs_storage._targetNode {return false} + if _storage._fromNode != rhs_storage._fromNode {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } diff --git a/Sources/DistributedCluster/Cluster/Protobuf/ClusterEvents.pb.swift b/Sources/DistributedCluster/Cluster/Protobuf/ClusterEvents.pb.swift index 52938cc86..f8ea726bb 100644 --- a/Sources/DistributedCluster/Cluster/Protobuf/ClusterEvents.pb.swift +++ b/Sources/DistributedCluster/Cluster/Protobuf/ClusterEvents.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Cluster/ClusterEvents.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -41,30 +40,33 @@ public struct _ProtoClusterEvent { /// other types of events are leadership and reachability changes, /// though those we do not need to send over the wire normally. - public var event: _ProtoClusterEvent.OneOf_Event? = nil + public var event: OneOf_Event? { + get {return _storage._event} + set {_uniqueStorage()._event = newValue} + } public var snapshot: _ProtoClusterMembership { get { - if case .snapshot(let v)? = event {return v} + if case .snapshot(let v)? = _storage._event {return v} return _ProtoClusterMembership() } - set {event = .snapshot(newValue)} + set {_uniqueStorage()._event = .snapshot(newValue)} } public var membershipChange: _ProtoClusterMembershipChange { get { - if case .membershipChange(let v)? = event {return v} + if case .membershipChange(let v)? = _storage._event {return v} return _ProtoClusterMembershipChange() } - set {event = .membershipChange(newValue)} + set {_uniqueStorage()._event = .membershipChange(newValue)} } public var leadershipChange: _ProtoClusterLeadershipChange { get { - if case .leadershipChange(let v)? = event {return v} + if case .leadershipChange(let v)? = _storage._event {return v} return _ProtoClusterLeadershipChange() } - set {event = .leadershipChange(newValue)} + set {_uniqueStorage()._event = .leadershipChange(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -78,22 +80,10 @@ public struct _ProtoClusterEvent { #if !swift(>=4.1) public static func ==(lhs: _ProtoClusterEvent.OneOf_Event, rhs: _ProtoClusterEvent.OneOf_Event) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.snapshot, .snapshot): return { - guard case .snapshot(let l) = lhs, case .snapshot(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.membershipChange, .membershipChange): return { - guard case .membershipChange(let l) = lhs, case .membershipChange(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.leadershipChange, .leadershipChange): return { - guard case .leadershipChange(let l) = lhs, case .leadershipChange(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.snapshot(let l), .snapshot(let r)): return l == r + case (.membershipChange(let l), .membershipChange(let r)): return l == r + case (.leadershipChange(let l), .leadershipChange(let r)): return l == r default: return false } } @@ -101,6 +91,8 @@ public struct _ProtoClusterEvent { } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterMembershipChange { @@ -108,24 +100,30 @@ public struct _ProtoClusterMembershipChange { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var node: _ProtoUniqueNode { - get {return _node ?? _ProtoUniqueNode()} - set {_node = newValue} + public var node: _ProtoClusterNode { + get {return _storage._node ?? _ProtoClusterNode()} + set {_uniqueStorage()._node = newValue} } /// Returns true if `node` has been explicitly set. - public var hasNode: Bool {return self._node != nil} + public var hasNode: Bool {return _storage._node != nil} /// Clears the value of `node`. Subsequent reads from it will return its default value. - public mutating func clearNode() {self._node = nil} + public mutating func clearNode() {_uniqueStorage()._node = nil} - public var fromStatus: _ProtoClusterMemberStatus = .unspecified + public var fromStatus: _ProtoClusterMemberStatus { + get {return _storage._fromStatus} + set {_uniqueStorage()._fromStatus = newValue} + } - public var toStatus: _ProtoClusterMemberStatus = .unspecified + public var toStatus: _ProtoClusterMemberStatus { + get {return _storage._toStatus} + set {_uniqueStorage()._toStatus = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _node: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterLeadershipChange { @@ -134,29 +132,28 @@ public struct _ProtoClusterLeadershipChange { // methods supported on all messages. public var oldLeader: _ProtoClusterMember { - get {return _oldLeader ?? _ProtoClusterMember()} - set {_oldLeader = newValue} + get {return _storage._oldLeader ?? _ProtoClusterMember()} + set {_uniqueStorage()._oldLeader = newValue} } /// Returns true if `oldLeader` has been explicitly set. - public var hasOldLeader: Bool {return self._oldLeader != nil} + public var hasOldLeader: Bool {return _storage._oldLeader != nil} /// Clears the value of `oldLeader`. Subsequent reads from it will return its default value. - public mutating func clearOldLeader() {self._oldLeader = nil} + public mutating func clearOldLeader() {_uniqueStorage()._oldLeader = nil} public var newLeader: _ProtoClusterMember { - get {return _newLeader ?? _ProtoClusterMember()} - set {_newLeader = newValue} + get {return _storage._newLeader ?? _ProtoClusterMember()} + set {_uniqueStorage()._newLeader = newValue} } /// Returns true if `newLeader` has been explicitly set. - public var hasNewLeader: Bool {return self._newLeader != nil} + public var hasNewLeader: Bool {return _storage._newLeader != nil} /// Clears the value of `newLeader`. Subsequent reads from it will return its default value. - public mutating func clearNewLeader() {self._newLeader = nil} + public mutating func clearNewLeader() {_uniqueStorage()._newLeader = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _oldLeader: _ProtoClusterMember? = nil - fileprivate var _newLeader: _ProtoClusterMember? = nil + fileprivate var _storage = _StorageClass.defaultInstance } // MARK: - Code below here is support for the SwiftProtobuf runtime. @@ -169,80 +166,85 @@ extension _ProtoClusterEvent: SwiftProtobuf.Message, SwiftProtobuf._MessageImple 3: .same(proto: "leadershipChange"), ] + fileprivate class _StorageClass { + var _event: _ProtoClusterEvent.OneOf_Event? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _event = source._event + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoClusterMembership? - var hadOneofValue = false - if let current = self.event { - hadOneofValue = true - if case .snapshot(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.event = .snapshot(v) + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoClusterMembership? + if let current = _storage._event { + try decoder.handleConflictingOneOf() + if case .snapshot(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._event = .snapshot(v)} + case 2: + var v: _ProtoClusterMembershipChange? + if let current = _storage._event { + try decoder.handleConflictingOneOf() + if case .membershipChange(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._event = .membershipChange(v)} + case 3: + var v: _ProtoClusterLeadershipChange? + if let current = _storage._event { + try decoder.handleConflictingOneOf() + if case .leadershipChange(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._event = .leadershipChange(v)} + default: break } - }() - case 2: try { - var v: _ProtoClusterMembershipChange? - var hadOneofValue = false - if let current = self.event { - hadOneofValue = true - if case .membershipChange(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.event = .membershipChange(v) - } - }() - case 3: try { - var v: _ProtoClusterLeadershipChange? - var hadOneofValue = false - if let current = self.event { - hadOneofValue = true - if case .leadershipChange(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.event = .leadershipChange(v) - } - }() - default: break } } } public func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.event { - case .snapshot?: try { - guard case .snapshot(let v)? = self.event else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .membershipChange?: try { - guard case .membershipChange(let v)? = self.event else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case .leadershipChange?: try { - guard case .leadershipChange(let v)? = self.event else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._event { + case .snapshot(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .membershipChange(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case .leadershipChange(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterEvent, rhs: _ProtoClusterEvent) -> Bool { - if lhs.event != rhs.event {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._event != rhs_storage._event {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -256,37 +258,70 @@ extension _ProtoClusterMembershipChange: SwiftProtobuf.Message, SwiftProtobuf._M 3: .same(proto: "toStatus"), ] + fileprivate class _StorageClass { + var _node: _ProtoClusterNode? = nil + var _fromStatus: _ProtoClusterMemberStatus = .unspecified + var _toStatus: _ProtoClusterMemberStatus = .unspecified + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _node = source._node + _fromStatus = source._fromStatus + _toStatus = source._toStatus + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._node) }() - case 2: try { try decoder.decodeSingularEnumField(value: &self.fromStatus) }() - case 3: try { try decoder.decodeSingularEnumField(value: &self.toStatus) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._node) + case 2: try decoder.decodeSingularEnumField(value: &_storage._fromStatus) + case 3: try decoder.decodeSingularEnumField(value: &_storage._toStatus) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._node { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.fromStatus != .unspecified { - try visitor.visitSingularEnumField(value: self.fromStatus, fieldNumber: 2) - } - if self.toStatus != .unspecified { - try visitor.visitSingularEnumField(value: self.toStatus, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._node { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._fromStatus != .unspecified { + try visitor.visitSingularEnumField(value: _storage._fromStatus, fieldNumber: 2) + } + if _storage._toStatus != .unspecified { + try visitor.visitSingularEnumField(value: _storage._toStatus, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterMembershipChange, rhs: _ProtoClusterMembershipChange) -> Bool { - if lhs._node != rhs._node {return false} - if lhs.fromStatus != rhs.fromStatus {return false} - if lhs.toStatus != rhs.toStatus {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._node != rhs_storage._node {return false} + if _storage._fromStatus != rhs_storage._fromStatus {return false} + if _storage._toStatus != rhs_storage._toStatus {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -299,32 +334,63 @@ extension _ProtoClusterLeadershipChange: SwiftProtobuf.Message, SwiftProtobuf._M 2: .same(proto: "newLeader"), ] + fileprivate class _StorageClass { + var _oldLeader: _ProtoClusterMember? = nil + var _newLeader: _ProtoClusterMember? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _oldLeader = source._oldLeader + _newLeader = source._newLeader + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._oldLeader) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._newLeader) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._oldLeader) + case 2: try decoder.decodeSingularMessageField(value: &_storage._newLeader) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._oldLeader { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._newLeader { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._oldLeader { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._newLeader { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterLeadershipChange, rhs: _ProtoClusterLeadershipChange) -> Bool { - if lhs._oldLeader != rhs._oldLeader {return false} - if lhs._newLeader != rhs._newLeader {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._oldLeader != rhs_storage._oldLeader {return false} + if _storage._newLeader != rhs_storage._newLeader {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } diff --git a/Sources/DistributedCluster/Cluster/Protobuf/Membership+Serialization.swift b/Sources/DistributedCluster/Cluster/Protobuf/Membership+Serialization.swift index f83788300..48ac0d72f 100644 --- a/Sources/DistributedCluster/Cluster/Protobuf/Membership+Serialization.swift +++ b/Sources/DistributedCluster/Cluster/Protobuf/Membership+Serialization.swift @@ -26,7 +26,7 @@ extension Cluster.Membership: _ProtobufRepresentable { try $0.toProto(context: context) } if let leader = self.leader { - proto.leaderNode = try leader.uniqueNode.toProto(context: context) + proto.leaderNode = try leader.node.toProto(context: context) } return proto } @@ -36,10 +36,10 @@ extension Cluster.Membership: _ProtobufRepresentable { self._members.reserveCapacity(proto.members.count) for protoMember in proto.members { let member = try Cluster.Member(fromProto: protoMember, context: context) - self._members[member.uniqueNode] = member + self._members[member.node] = member } if proto.hasLeaderNode { - self._leaderNode = try UniqueNode(fromProto: proto.leaderNode, context: context) + self._leaderNode = try Cluster.Node(fromProto: proto.leaderNode, context: context) } else { self._leaderNode = nil } @@ -51,7 +51,7 @@ extension Cluster.Member: _ProtobufRepresentable { public func toProto(context: Serialization.Context) throws -> ProtobufRepresentation { var proto = ProtobufRepresentation() - proto.node = try self.uniqueNode.toProto(context: context) + proto.node = try self.node.toProto(context: context) proto.status = try self.status.toProto(context: context) proto.reachability = try self.reachability.toProto(context: context) if let number = self._upNumber { @@ -64,7 +64,7 @@ extension Cluster.Member: _ProtobufRepresentable { guard proto.hasNode else { throw SerializationError(.missingField("node", type: "\(ProtobufRepresentation.self)")) } - self.uniqueNode = try .init(fromProto: proto.node, context: context) + self.node = try .init(fromProto: proto.node, context: context) self.status = try .init(fromProto: proto.status, context: context) self.reachability = try .init(fromProto: proto.reachability, context: context) self._upNumber = proto.upNumber == 0 ? nil : Int(proto.upNumber) diff --git a/Sources/DistributedCluster/Cluster/Protobuf/Membership.pb.swift b/Sources/DistributedCluster/Cluster/Protobuf/Membership.pb.swift index 2729b182e..637e1355c 100644 --- a/Sources/DistributedCluster/Cluster/Protobuf/Membership.pb.swift +++ b/Sources/DistributedCluster/Cluster/Protobuf/Membership.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Cluster/Membership.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -139,22 +138,25 @@ public struct _ProtoClusterMembership { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var members: [_ProtoClusterMember] = [] + public var members: [_ProtoClusterMember] { + get {return _storage._members} + set {_uniqueStorage()._members = newValue} + } - public var leaderNode: _ProtoUniqueNode { - get {return _leaderNode ?? _ProtoUniqueNode()} - set {_leaderNode = newValue} + public var leaderNode: _ProtoClusterNode { + get {return _storage._leaderNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._leaderNode = newValue} } /// Returns true if `leaderNode` has been explicitly set. - public var hasLeaderNode: Bool {return self._leaderNode != nil} + public var hasLeaderNode: Bool {return _storage._leaderNode != nil} /// Clears the value of `leaderNode`. Subsequent reads from it will return its default value. - public mutating func clearLeaderNode() {self._leaderNode = nil} + public mutating func clearLeaderNode() {_uniqueStorage()._leaderNode = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _leaderNode: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterMember { @@ -162,26 +164,35 @@ public struct _ProtoClusterMember { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var node: _ProtoUniqueNode { - get {return _node ?? _ProtoUniqueNode()} - set {_node = newValue} + public var node: _ProtoClusterNode { + get {return _storage._node ?? _ProtoClusterNode()} + set {_uniqueStorage()._node = newValue} } /// Returns true if `node` has been explicitly set. - public var hasNode: Bool {return self._node != nil} + public var hasNode: Bool {return _storage._node != nil} /// Clears the value of `node`. Subsequent reads from it will return its default value. - public mutating func clearNode() {self._node = nil} + public mutating func clearNode() {_uniqueStorage()._node = nil} - public var status: _ProtoClusterMemberStatus = .unspecified + public var status: _ProtoClusterMemberStatus { + get {return _storage._status} + set {_uniqueStorage()._status = newValue} + } - public var reachability: _ProtoClusterMemberReachability = .unspecified + public var reachability: _ProtoClusterMemberReachability { + get {return _storage._reachability} + set {_uniqueStorage()._reachability = newValue} + } - public var upNumber: UInt32 = 0 + public var upNumber: UInt32 { + get {return _storage._upNumber} + set {_uniqueStorage()._upNumber = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _node: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterMembershipGossip { @@ -189,36 +200,38 @@ public struct _ProtoClusterMembershipGossip { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - /// Membership contains full UniqueNode renderings, and the owner and seen table refer to them by UniqueNode.ID + /// Membership contains full ClusterNode renderings, and the owner and seen table refer to them by ClusterNode.ID /// this saves us space (by avoiding to render the unique node explicitly many times for each member/seen-entry). public var membership: _ProtoClusterMembership { - get {return _membership ?? _ProtoClusterMembership()} - set {_membership = newValue} + get {return _storage._membership ?? _ProtoClusterMembership()} + set {_uniqueStorage()._membership = newValue} } /// Returns true if `membership` has been explicitly set. - public var hasMembership: Bool {return self._membership != nil} + public var hasMembership: Bool {return _storage._membership != nil} /// Clears the value of `membership`. Subsequent reads from it will return its default value. - public mutating func clearMembership() {self._membership = nil} + public mutating func clearMembership() {_uniqueStorage()._membership = nil} - /// The following fields will use compressed UniqueNode encoding and ONLY serialize them as their uniqueNodeID. - /// During deserialization the fields can be resolved against the membership to obtain full UniqueNode values if necessary. - public var ownerUniqueNodeID: UInt64 = 0 + /// The following fields will use compressed ClusterNode encoding and ONLY serialize them as their nodeID. + /// During deserialization the fields can be resolved against the membership to obtain full ClusterNode values if necessary. + public var ownerClusterNodeID: UInt64 { + get {return _storage._ownerClusterNodeID} + set {_uniqueStorage()._ownerClusterNodeID = newValue} + } public var seenTable: _ProtoClusterMembershipSeenTable { - get {return _seenTable ?? _ProtoClusterMembershipSeenTable()} - set {_seenTable = newValue} + get {return _storage._seenTable ?? _ProtoClusterMembershipSeenTable()} + set {_uniqueStorage()._seenTable = newValue} } /// Returns true if `seenTable` has been explicitly set. - public var hasSeenTable: Bool {return self._seenTable != nil} + public var hasSeenTable: Bool {return _storage._seenTable != nil} /// Clears the value of `seenTable`. Subsequent reads from it will return its default value. - public mutating func clearSeenTable() {self._seenTable = nil} + public mutating func clearSeenTable() {_uniqueStorage()._seenTable = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _membership: _ProtoClusterMembership? = nil - fileprivate var _seenTable: _ProtoClusterMembershipSeenTable? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoClusterMembershipSeenTable { @@ -238,22 +251,25 @@ public struct _ProtoClusterMembershipSeenTableRow { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var uniqueNodeID: UInt64 = 0 + public var nodeID: UInt64 { + get {return _storage._nodeID} + set {_uniqueStorage()._nodeID = newValue} + } public var version: _ProtoVersionVector { - get {return _version ?? _ProtoVersionVector()} - set {_version = newValue} + get {return _storage._version ?? _ProtoVersionVector()} + set {_uniqueStorage()._version = newValue} } /// Returns true if `version` has been explicitly set. - public var hasVersion: Bool {return self._version != nil} + public var hasVersion: Bool {return _storage._version != nil} /// Clears the value of `version`. Subsequent reads from it will return its default value. - public mutating func clearVersion() {self._version = nil} + public mutating func clearVersion() {_uniqueStorage()._version = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _version: _ProtoVersionVector? = nil + fileprivate var _storage = _StorageClass.defaultInstance } // MARK: - Code below here is support for the SwiftProtobuf runtime. @@ -284,32 +300,63 @@ extension _ProtoClusterMembership: SwiftProtobuf.Message, SwiftProtobuf._Message 2: .same(proto: "leaderNode"), ] + fileprivate class _StorageClass { + var _members: [_ProtoClusterMember] = [] + var _leaderNode: _ProtoClusterNode? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _members = source._members + _leaderNode = source._leaderNode + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeRepeatedMessageField(value: &self.members) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._leaderNode) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeRepeatedMessageField(value: &_storage._members) + case 2: try decoder.decodeSingularMessageField(value: &_storage._leaderNode) + default: break + } } } } public func traverse(visitor: inout V) throws { - if !self.members.isEmpty { - try visitor.visitRepeatedMessageField(value: self.members, fieldNumber: 1) - } - if let v = self._leaderNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if !_storage._members.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._members, fieldNumber: 1) + } + if let v = _storage._leaderNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterMembership, rhs: _ProtoClusterMembership) -> Bool { - if lhs.members != rhs.members {return false} - if lhs._leaderNode != rhs._leaderNode {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._members != rhs_storage._members {return false} + if _storage._leaderNode != rhs_storage._leaderNode {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -324,42 +371,77 @@ extension _ProtoClusterMember: SwiftProtobuf.Message, SwiftProtobuf._MessageImpl 4: .same(proto: "upNumber"), ] + fileprivate class _StorageClass { + var _node: _ProtoClusterNode? = nil + var _status: _ProtoClusterMemberStatus = .unspecified + var _reachability: _ProtoClusterMemberReachability = .unspecified + var _upNumber: UInt32 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _node = source._node + _status = source._status + _reachability = source._reachability + _upNumber = source._upNumber + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._node) }() - case 2: try { try decoder.decodeSingularEnumField(value: &self.status) }() - case 3: try { try decoder.decodeSingularEnumField(value: &self.reachability) }() - case 4: try { try decoder.decodeSingularUInt32Field(value: &self.upNumber) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._node) + case 2: try decoder.decodeSingularEnumField(value: &_storage._status) + case 3: try decoder.decodeSingularEnumField(value: &_storage._reachability) + case 4: try decoder.decodeSingularUInt32Field(value: &_storage._upNumber) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._node { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.status != .unspecified { - try visitor.visitSingularEnumField(value: self.status, fieldNumber: 2) - } - if self.reachability != .unspecified { - try visitor.visitSingularEnumField(value: self.reachability, fieldNumber: 3) - } - if self.upNumber != 0 { - try visitor.visitSingularUInt32Field(value: self.upNumber, fieldNumber: 4) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._node { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._status != .unspecified { + try visitor.visitSingularEnumField(value: _storage._status, fieldNumber: 2) + } + if _storage._reachability != .unspecified { + try visitor.visitSingularEnumField(value: _storage._reachability, fieldNumber: 3) + } + if _storage._upNumber != 0 { + try visitor.visitSingularUInt32Field(value: _storage._upNumber, fieldNumber: 4) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterMember, rhs: _ProtoClusterMember) -> Bool { - if lhs._node != rhs._node {return false} - if lhs.status != rhs.status {return false} - if lhs.reachability != rhs.reachability {return false} - if lhs.upNumber != rhs.upNumber {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._node != rhs_storage._node {return false} + if _storage._status != rhs_storage._status {return false} + if _storage._reachability != rhs_storage._reachability {return false} + if _storage._upNumber != rhs_storage._upNumber {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -369,41 +451,74 @@ extension _ProtoClusterMembershipGossip: SwiftProtobuf.Message, SwiftProtobuf._M public static let protoMessageName: String = "ClusterMembershipGossip" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "membership"), - 2: .same(proto: "ownerUniqueNodeID"), + 2: .same(proto: "ownerClusterNodeID"), 3: .same(proto: "seenTable"), ] + fileprivate class _StorageClass { + var _membership: _ProtoClusterMembership? = nil + var _ownerClusterNodeID: UInt64 = 0 + var _seenTable: _ProtoClusterMembershipSeenTable? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _membership = source._membership + _ownerClusterNodeID = source._ownerClusterNodeID + _seenTable = source._seenTable + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._membership) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.ownerUniqueNodeID) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._seenTable) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._membership) + case 2: try decoder.decodeSingularUInt64Field(value: &_storage._ownerClusterNodeID) + case 3: try decoder.decodeSingularMessageField(value: &_storage._seenTable) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._membership { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.ownerUniqueNodeID != 0 { - try visitor.visitSingularUInt64Field(value: self.ownerUniqueNodeID, fieldNumber: 2) - } - if let v = self._seenTable { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._membership { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._ownerClusterNodeID != 0 { + try visitor.visitSingularUInt64Field(value: _storage._ownerClusterNodeID, fieldNumber: 2) + } + if let v = _storage._seenTable { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterMembershipGossip, rhs: _ProtoClusterMembershipGossip) -> Bool { - if lhs._membership != rhs._membership {return false} - if lhs.ownerUniqueNodeID != rhs.ownerUniqueNodeID {return false} - if lhs._seenTable != rhs._seenTable {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._membership != rhs_storage._membership {return false} + if _storage._ownerClusterNodeID != rhs_storage._ownerClusterNodeID {return false} + if _storage._seenTable != rhs_storage._seenTable {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -417,11 +532,8 @@ extension _ProtoClusterMembershipSeenTable: SwiftProtobuf.Message, SwiftProtobuf public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeRepeatedMessageField(value: &self.rows) }() + case 1: try decoder.decodeRepeatedMessageField(value: &self.rows) default: break } } @@ -444,36 +556,67 @@ extension _ProtoClusterMembershipSeenTable: SwiftProtobuf.Message, SwiftProtobuf extension _ProtoClusterMembershipSeenTableRow: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { public static let protoMessageName: String = "ClusterMembershipSeenTableRow" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "uniqueNodeID"), + 1: .same(proto: "nodeID"), 2: .same(proto: "version"), ] + fileprivate class _StorageClass { + var _nodeID: UInt64 = 0 + var _version: _ProtoVersionVector? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _nodeID = source._nodeID + _version = source._version + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.uniqueNodeID) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._version) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularUInt64Field(value: &_storage._nodeID) + case 2: try decoder.decodeSingularMessageField(value: &_storage._version) + default: break + } } } } public func traverse(visitor: inout V) throws { - if self.uniqueNodeID != 0 { - try visitor.visitSingularUInt64Field(value: self.uniqueNodeID, fieldNumber: 1) - } - if let v = self._version { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if _storage._nodeID != 0 { + try visitor.visitSingularUInt64Field(value: _storage._nodeID, fieldNumber: 1) + } + if let v = _storage._version { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoClusterMembershipSeenTableRow, rhs: _ProtoClusterMembershipSeenTableRow) -> Bool { - if lhs.uniqueNodeID != rhs.uniqueNodeID {return false} - if lhs._version != rhs._version {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._nodeID != rhs_storage._nodeID {return false} + if _storage._version != rhs_storage._version {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } diff --git a/Sources/DistributedCluster/Cluster/Reception/OperationLogDistributedReceptionist.swift b/Sources/DistributedCluster/Cluster/Reception/OperationLogDistributedReceptionist.swift index 3ed38df4f..9d3b2dc2d 100644 --- a/Sources/DistributedCluster/Cluster/Reception/OperationLogDistributedReceptionist.swift +++ b/Sources/DistributedCluster/Cluster/Reception/OperationLogDistributedReceptionist.swift @@ -324,7 +324,7 @@ extension OpLogDistributedReceptionist: LifecycleWatch { "receptionist/guest": "\(guest.id)", ]) - guard id._isLocal || (id.uniqueNode == actorSystem.cluster.uniqueNode) else { + guard id._isLocal || (id.node == actorSystem.cluster.node) else { self.log.warning(""" Actor [\(guest.id)] attempted to checkIn under key [\(key)], with NOT-local receptionist! \ Actors MUST checkIn with their local receptionist in today's Receptionist implementation. @@ -669,7 +669,7 @@ extension OpLogDistributedReceptionist { return // this would mean we tried to pull from a "local" receptionist, bail out } - guard self.membership.contains(receptionistID.uniqueNode) else { + guard self.membership.contains(receptionistID.node) else { // node is either not known to us yet, OR has been downed and removed // avoid talking to it until we see it in membership. return @@ -786,7 +786,7 @@ extension OpLogDistributedReceptionist { if replayer == nil, until == 0 { self.log.debug("Received message from \(peer), but no replayer available, create one ad-hoc now", metadata: [ - "peer": "\(peer.id.uniqueNode)", + "peer": "\(peer.id.node)", ]) // TODO: Generally we should trigger a `onNewClusterMember` but seems we got a message before that triggered // Seems ordering became less strict here with DA unfortunately...? @@ -884,7 +884,7 @@ extension OpLogDistributedReceptionist { extension OpLogDistributedReceptionist { public func terminated(actor id: ID) { - if id == ActorID._receptionist(on: id.uniqueNode, for: .distributedActors) { + if id == ActorID._receptionist(on: id.node, for: .distributedActors) { self.log.debug("Watched receptionist terminated: \(id)") self.receptionistTerminated(identity: id) } else { @@ -894,7 +894,7 @@ extension OpLogDistributedReceptionist { } private func receptionistTerminated(identity id: ID) { - self.pruneClusterMember(removedNode: id.uniqueNode) + self.pruneClusterMember(removedNode: id.node) } private func actorTerminated(id: ID) { @@ -963,7 +963,7 @@ extension OpLogDistributedReceptionist { return // not a new member } - guard change.node != actorSystem.cluster.uniqueNode else { + guard change.node != actorSystem.cluster.node else { return // no need to contact our own node, this would be "us" } @@ -983,7 +983,7 @@ extension OpLogDistributedReceptionist { self.replicateOpsBatch(to: remoteReceptionist) } - func pruneClusterMember(removedNode: UniqueNode) { + func pruneClusterMember(removedNode: Cluster.Node) { self.log.trace("Pruning cluster member: \(removedNode)") let terminatedReceptionistID = ActorID._receptionist(on: removedNode, for: .distributedActors) let equalityHackPeer = try! Self.resolve(id: terminatedReceptionistID, using: actorSystem) // try!-safe because we know the address is correct and remote diff --git a/Sources/DistributedCluster/Cluster/Reception/_OperationLogClusterReceptionistBehavior.swift b/Sources/DistributedCluster/Cluster/Reception/_OperationLogClusterReceptionistBehavior.swift index 0a6d6ab38..6b1ee51c9 100644 --- a/Sources/DistributedCluster/Cluster/Reception/_OperationLogClusterReceptionistBehavior.swift +++ b/Sources/DistributedCluster/Cluster/Reception/_OperationLogClusterReceptionistBehavior.swift @@ -183,7 +183,7 @@ extension _OperationLogClusterReceptionist { let key = message._key.asAnyKey let ref = message._addressableActorRef - guard ref.id._isLocal || (ref.id.uniqueNode == context.system.cluster.uniqueNode) else { + guard ref.id._isLocal || (ref.id.node == context.system.cluster.node) else { context.log.warning(""" Actor [\(ref)] attempted to register under key [\(key)], with NOT-local receptionist! \ Actors MUST register with their local receptionist in today's Receptionist implementation. @@ -388,7 +388,7 @@ extension _OperationLogClusterReceptionist { return // this would mean we tried to pull from a "local" receptionist, bail out } - guard self.membership.contains(receptionistAddress.uniqueNode) else { + guard self.membership.contains(receptionistAddress.node) else { // node is either not known to us yet, OR has been downed and removed // avoid talking to it until we see it in membership. return @@ -542,7 +542,7 @@ extension _OperationLogClusterReceptionist { extension _OperationLogClusterReceptionist { private func onTerminated(context: _ActorContext<_ReceptionistMessage>, terminated: _Signals.Terminated) { - if terminated.id == ActorID._receptionist(on: terminated.id.uniqueNode, for: .actorRefs) { + if terminated.id == ActorID._receptionist(on: terminated.id.node, for: .actorRefs) { context.log.debug("Watched receptionist terminated: \(terminated)") self.onReceptionistTerminated(context, terminated: terminated) } else { @@ -552,7 +552,7 @@ extension _OperationLogClusterReceptionist { } private func onReceptionistTerminated(_ context: _ActorContext, terminated: _Signals.Terminated) { - self.pruneClusterMember(context, removedNode: terminated.id.uniqueNode) + self.pruneClusterMember(context, removedNode: terminated.id.node) } private func onActorTerminated(_ context: _ActorContext, terminated: _Signals.Terminated) { @@ -619,7 +619,7 @@ extension _OperationLogClusterReceptionist { return // not a new member } - guard change.node != context.system.cluster.uniqueNode else { + guard change.node != context.system.cluster.node else { return // no need to contact our own node, this would be "us" } @@ -639,7 +639,7 @@ extension _OperationLogClusterReceptionist { self.replicateOpsBatch(context, to: remoteReceptionist) } - private func pruneClusterMember(_ context: _ActorContext<_OperationLogClusterReceptionist.Message>, removedNode: UniqueNode) { + private func pruneClusterMember(_ context: _ActorContext<_OperationLogClusterReceptionist.Message>, removedNode: Cluster.Node) { context.log.trace("Pruning cluster member: \(removedNode)") let terminatedReceptionistAddress = ActorID._receptionist(on: removedNode, for: .actorRefs) let equalityHackPeerRef = _ActorRef(.deadLetters(.init(context.log, id: terminatedReceptionistAddress, system: nil))) diff --git a/Sources/DistributedCluster/Cluster/SWIM/ClusterMembership+Converters.swift b/Sources/DistributedCluster/Cluster/SWIM/ClusterMembership+Converters.swift index b4fc966f9..3d0b711a7 100644 --- a/Sources/DistributedCluster/Cluster/SWIM/ClusterMembership+Converters.swift +++ b/Sources/DistributedCluster/Cluster/SWIM/ClusterMembership+Converters.swift @@ -18,21 +18,21 @@ import Logging import SWIM extension ClusterMembership.Node { - init(uniqueNode: UniqueNode) { + init(node: Cluster.Node) { self.init( - protocol: uniqueNode.node.protocol, - name: uniqueNode.node.systemName, - host: uniqueNode.host, - port: uniqueNode.port, - uid: uniqueNode.nid.value + protocol: node.endpoint.protocol, + name: node.endpoint.systemName, + host: node.host, + port: node.port, + uid: node.nid.value ) } func swimShell(_ system: ClusterSystem) -> SWIMActor { - try! SWIMActor.resolve(id: ._swim(on: self.asUniqueNode!), using: system) // TODO: the ! is not so nice + try! SWIMActor.resolve(id: ._swim(on: self.asClusterNode!), using: system) // TODO: the ! is not so nice } - var asUniqueNode: UniqueNode? { + var asClusterNode: Cluster.Node? { guard let uid = self.uid else { return nil } @@ -40,13 +40,13 @@ extension ClusterMembership.Node { return .init(protocol: self.protocol, systemName: self.name ?? "", host: self.host, port: self.port, nid: .init(uid)) } - var asNode: DistributedCluster.Node { + var asNode: DistributedCluster.Cluster.Endpoint { .init(protocol: self.protocol, systemName: self.name ?? "", host: self.host, port: self.port) } } -extension UniqueNode { +extension Cluster.Node { var asSWIMNode: ClusterMembership.Node { - .init(protocol: self.node.protocol, name: self.node.systemName, host: self.node.host, port: self.port, uid: self.nid.value) + .init(protocol: self.endpoint.protocol, name: self.endpoint.systemName, host: self.endpoint.host, port: self.port, uid: self.nid.value) } } diff --git a/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM+Serialization.swift b/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM+Serialization.swift index 8f0d7de2c..4e1392c04 100644 --- a/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM+Serialization.swift +++ b/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM+Serialization.swift @@ -168,18 +168,18 @@ extension SWIM.PingResponse: _ProtobufRepresentable { } extension ClusterMembership.Node: _ProtobufRepresentable { - public typealias ProtobufRepresentation = _ProtoUniqueNode + public typealias ProtobufRepresentation = _ProtoClusterNode public func toProto(context: Serialization.Context) throws -> ProtobufRepresentation { var proto = ProtobufRepresentation() - var protoNode = _ProtoNode() - protoNode.protocol = self.protocol + var protoEndpoint = _ProtoClusterEndpoint() + protoEndpoint.protocol = self.protocol if let name = self.name { - protoNode.system = name + protoEndpoint.system = name } - protoNode.hostname = self.host - protoNode.port = UInt32(self.port) - proto.node = protoNode + protoEndpoint.hostname = self.host + protoEndpoint.port = UInt32(self.port) + proto.endpoint = protoEndpoint if let uid = self.uid { proto.nid = uid } @@ -187,10 +187,10 @@ extension ClusterMembership.Node: _ProtobufRepresentable { } public init(fromProto proto: ProtobufRepresentation, context: Serialization.Context) throws { - guard proto.hasNode else { - throw SerializationError(.missingField("node", type: String(describing: Node.self))) + guard proto.hasEndpoint else { + throw SerializationError(.missingField("endpoint", type: String(describing: Cluster.Endpoint.self))) } - let protoNode: _ProtoNode = proto.node + let protoNode: _ProtoClusterEndpoint = proto.endpoint let `protocol` = protoNode.protocol let name: String? if protoNode.protocol != "" { diff --git a/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM.pb.swift b/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM.pb.swift index 7befe53d8..cadaa45f0 100644 --- a/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM.pb.swift +++ b/Sources/DistributedCluster/Cluster/SWIM/Protobuf/SWIM.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Cluster/SWIM/SWIM.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -39,22 +38,25 @@ public struct _ProtoSWIMPingResponse { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var pingResponse: _ProtoSWIMPingResponse.OneOf_PingResponse? = nil + public var pingResponse: OneOf_PingResponse? { + get {return _storage._pingResponse} + set {_uniqueStorage()._pingResponse = newValue} + } public var ack: _ProtoSWIMPingResponse.Ack { get { - if case .ack(let v)? = pingResponse {return v} + if case .ack(let v)? = _storage._pingResponse {return v} return _ProtoSWIMPingResponse.Ack() } - set {pingResponse = .ack(newValue)} + set {_uniqueStorage()._pingResponse = .ack(newValue)} } public var nack: _ProtoSWIMPingResponse.Nack { get { - if case .nack(let v)? = pingResponse {return v} + if case .nack(let v)? = _storage._pingResponse {return v} return _ProtoSWIMPingResponse.Nack() } - set {pingResponse = .nack(newValue)} + set {_uniqueStorage()._pingResponse = .nack(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -65,18 +67,9 @@ public struct _ProtoSWIMPingResponse { #if !swift(>=4.1) public static func ==(lhs: _ProtoSWIMPingResponse.OneOf_PingResponse, rhs: _ProtoSWIMPingResponse.OneOf_PingResponse) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.ack, .ack): return { - guard case .ack(let l) = lhs, case .ack(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.nack, .nack): return { - guard case .nack(let l) = lhs, case .nack(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.ack(let l), .ack(let r)): return l == r + case (.nack(let l), .nack(let r)): return l == r default: return false } } @@ -89,33 +82,38 @@ public struct _ProtoSWIMPingResponse { // methods supported on all messages. public var target: _ProtoActorID { - get {return _target ?? _ProtoActorID()} - set {_target = newValue} + get {return _storage._target ?? _ProtoActorID()} + set {_uniqueStorage()._target = newValue} } /// Returns true if `target` has been explicitly set. - public var hasTarget: Bool {return self._target != nil} + public var hasTarget: Bool {return _storage._target != nil} /// Clears the value of `target`. Subsequent reads from it will return its default value. - public mutating func clearTarget() {self._target = nil} + public mutating func clearTarget() {_uniqueStorage()._target = nil} - public var incarnation: UInt64 = 0 + public var incarnation: UInt64 { + get {return _storage._incarnation} + set {_uniqueStorage()._incarnation = newValue} + } public var payload: _ProtoSWIMGossipPayload { - get {return _payload ?? _ProtoSWIMGossipPayload()} - set {_payload = newValue} + get {return _storage._payload ?? _ProtoSWIMGossipPayload()} + set {_uniqueStorage()._payload = newValue} } /// Returns true if `payload` has been explicitly set. - public var hasPayload: Bool {return self._payload != nil} + public var hasPayload: Bool {return _storage._payload != nil} /// Clears the value of `payload`. Subsequent reads from it will return its default value. - public mutating func clearPayload() {self._payload = nil} + public mutating func clearPayload() {_uniqueStorage()._payload = nil} - public var sequenceNumber: UInt32 = 0 + public var sequenceNumber: UInt32 { + get {return _storage._sequenceNumber} + set {_uniqueStorage()._sequenceNumber = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _target: _ProtoActorID? = nil - fileprivate var _payload: _ProtoSWIMGossipPayload? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct Nack { @@ -124,24 +122,29 @@ public struct _ProtoSWIMPingResponse { // methods supported on all messages. public var target: _ProtoActorID { - get {return _target ?? _ProtoActorID()} - set {_target = newValue} + get {return _storage._target ?? _ProtoActorID()} + set {_uniqueStorage()._target = newValue} } /// Returns true if `target` has been explicitly set. - public var hasTarget: Bool {return self._target != nil} + public var hasTarget: Bool {return _storage._target != nil} /// Clears the value of `target`. Subsequent reads from it will return its default value. - public mutating func clearTarget() {self._target = nil} + public mutating func clearTarget() {_uniqueStorage()._target = nil} - public var sequenceNumber: UInt32 = 0 + public var sequenceNumber: UInt32 { + get {return _storage._sequenceNumber} + set {_uniqueStorage()._sequenceNumber = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _target: _ProtoActorID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSWIMStatus { @@ -153,7 +156,7 @@ public struct _ProtoSWIMStatus { public var incarnation: UInt64 = 0 - public var suspectedBy: [_ProtoUniqueNode] = [] + public var suspectedBy: [_ProtoClusterNode] = [] public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -218,31 +221,33 @@ public struct _ProtoSWIMMember { // methods supported on all messages. public var id: _ProtoActorID { - get {return _id ?? _ProtoActorID()} - set {_id = newValue} + get {return _storage._id ?? _ProtoActorID()} + set {_uniqueStorage()._id = newValue} } /// Returns true if `id` has been explicitly set. - public var hasID: Bool {return self._id != nil} + public var hasID: Bool {return _storage._id != nil} /// Clears the value of `id`. Subsequent reads from it will return its default value. - public mutating func clearID() {self._id = nil} + public mutating func clearID() {_uniqueStorage()._id = nil} public var status: _ProtoSWIMStatus { - get {return _status ?? _ProtoSWIMStatus()} - set {_status = newValue} + get {return _storage._status ?? _ProtoSWIMStatus()} + set {_uniqueStorage()._status = newValue} } /// Returns true if `status` has been explicitly set. - public var hasStatus: Bool {return self._status != nil} + public var hasStatus: Bool {return _storage._status != nil} /// Clears the value of `status`. Subsequent reads from it will return its default value. - public mutating func clearStatus() {self._status = nil} + public mutating func clearStatus() {_uniqueStorage()._status = nil} - public var protocolPeriod: UInt64 = 0 + public var protocolPeriod: UInt64 { + get {return _storage._protocolPeriod} + set {_uniqueStorage()._protocolPeriod = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _id: _ProtoActorID? = nil - fileprivate var _status: _ProtoSWIMStatus? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSWIMGossipPayload { @@ -266,63 +271,75 @@ extension _ProtoSWIMPingResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageI 2: .same(proto: "nack"), ] + fileprivate class _StorageClass { + var _pingResponse: _ProtoSWIMPingResponse.OneOf_PingResponse? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _pingResponse = source._pingResponse + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoSWIMPingResponse.Ack? - var hadOneofValue = false - if let current = self.pingResponse { - hadOneofValue = true - if case .ack(let m) = current {v = m} + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoSWIMPingResponse.Ack? + if let current = _storage._pingResponse { + try decoder.handleConflictingOneOf() + if case .ack(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._pingResponse = .ack(v)} + case 2: + var v: _ProtoSWIMPingResponse.Nack? + if let current = _storage._pingResponse { + try decoder.handleConflictingOneOf() + if case .nack(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._pingResponse = .nack(v)} + default: break } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.pingResponse = .ack(v) - } - }() - case 2: try { - var v: _ProtoSWIMPingResponse.Nack? - var hadOneofValue = false - if let current = self.pingResponse { - hadOneofValue = true - if case .nack(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.pingResponse = .nack(v) - } - }() - default: break } } } public func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.pingResponse { - case .ack?: try { - guard case .ack(let v)? = self.pingResponse else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .nack?: try { - guard case .nack(let v)? = self.pingResponse else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._pingResponse { + case .ack(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .nack(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSWIMPingResponse, rhs: _ProtoSWIMPingResponse) -> Bool { - if lhs.pingResponse != rhs.pingResponse {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._pingResponse != rhs_storage._pingResponse {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -337,42 +354,77 @@ extension _ProtoSWIMPingResponse.Ack: SwiftProtobuf.Message, SwiftProtobuf._Mess 4: .same(proto: "sequenceNumber"), ] + fileprivate class _StorageClass { + var _target: _ProtoActorID? = nil + var _incarnation: UInt64 = 0 + var _payload: _ProtoSWIMGossipPayload? = nil + var _sequenceNumber: UInt32 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _target = source._target + _incarnation = source._incarnation + _payload = source._payload + _sequenceNumber = source._sequenceNumber + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._target) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.incarnation) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._payload) }() - case 4: try { try decoder.decodeSingularUInt32Field(value: &self.sequenceNumber) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._target) + case 2: try decoder.decodeSingularUInt64Field(value: &_storage._incarnation) + case 3: try decoder.decodeSingularMessageField(value: &_storage._payload) + case 4: try decoder.decodeSingularUInt32Field(value: &_storage._sequenceNumber) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._target { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.incarnation != 0 { - try visitor.visitSingularUInt64Field(value: self.incarnation, fieldNumber: 2) - } - if let v = self._payload { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - } - if self.sequenceNumber != 0 { - try visitor.visitSingularUInt32Field(value: self.sequenceNumber, fieldNumber: 4) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._target { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._incarnation != 0 { + try visitor.visitSingularUInt64Field(value: _storage._incarnation, fieldNumber: 2) + } + if let v = _storage._payload { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } + if _storage._sequenceNumber != 0 { + try visitor.visitSingularUInt32Field(value: _storage._sequenceNumber, fieldNumber: 4) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSWIMPingResponse.Ack, rhs: _ProtoSWIMPingResponse.Ack) -> Bool { - if lhs._target != rhs._target {return false} - if lhs.incarnation != rhs.incarnation {return false} - if lhs._payload != rhs._payload {return false} - if lhs.sequenceNumber != rhs.sequenceNumber {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._target != rhs_storage._target {return false} + if _storage._incarnation != rhs_storage._incarnation {return false} + if _storage._payload != rhs_storage._payload {return false} + if _storage._sequenceNumber != rhs_storage._sequenceNumber {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -385,32 +437,63 @@ extension _ProtoSWIMPingResponse.Nack: SwiftProtobuf.Message, SwiftProtobuf._Mes 2: .same(proto: "sequenceNumber"), ] + fileprivate class _StorageClass { + var _target: _ProtoActorID? = nil + var _sequenceNumber: UInt32 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _target = source._target + _sequenceNumber = source._sequenceNumber + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._target) }() - case 2: try { try decoder.decodeSingularUInt32Field(value: &self.sequenceNumber) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._target) + case 2: try decoder.decodeSingularUInt32Field(value: &_storage._sequenceNumber) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._target { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.sequenceNumber != 0 { - try visitor.visitSingularUInt32Field(value: self.sequenceNumber, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._target { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._sequenceNumber != 0 { + try visitor.visitSingularUInt32Field(value: _storage._sequenceNumber, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSWIMPingResponse.Nack, rhs: _ProtoSWIMPingResponse.Nack) -> Bool { - if lhs._target != rhs._target {return false} - if lhs.sequenceNumber != rhs.sequenceNumber {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._target != rhs_storage._target {return false} + if _storage._sequenceNumber != rhs_storage._sequenceNumber {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -426,13 +509,10 @@ extension _ProtoSWIMStatus: SwiftProtobuf.Message, SwiftProtobuf._MessageImpleme public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularEnumField(value: &self.type) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.incarnation) }() - case 3: try { try decoder.decodeRepeatedMessageField(value: &self.suspectedBy) }() + case 1: try decoder.decodeSingularEnumField(value: &self.type) + case 2: try decoder.decodeSingularUInt64Field(value: &self.incarnation) + case 3: try decoder.decodeRepeatedMessageField(value: &self.suspectedBy) default: break } } @@ -478,37 +558,70 @@ extension _ProtoSWIMMember: SwiftProtobuf.Message, SwiftProtobuf._MessageImpleme 3: .same(proto: "protocolPeriod"), ] + fileprivate class _StorageClass { + var _id: _ProtoActorID? = nil + var _status: _ProtoSWIMStatus? = nil + var _protocolPeriod: UInt64 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _id = source._id + _status = source._status + _protocolPeriod = source._protocolPeriod + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._id) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._status) }() - case 3: try { try decoder.decodeSingularUInt64Field(value: &self.protocolPeriod) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._id) + case 2: try decoder.decodeSingularMessageField(value: &_storage._status) + case 3: try decoder.decodeSingularUInt64Field(value: &_storage._protocolPeriod) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._id { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._status { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if self.protocolPeriod != 0 { - try visitor.visitSingularUInt64Field(value: self.protocolPeriod, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._id { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._status { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if _storage._protocolPeriod != 0 { + try visitor.visitSingularUInt64Field(value: _storage._protocolPeriod, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSWIMMember, rhs: _ProtoSWIMMember) -> Bool { - if lhs._id != rhs._id {return false} - if lhs._status != rhs._status {return false} - if lhs.protocolPeriod != rhs.protocolPeriod {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._id != rhs_storage._id {return false} + if _storage._status != rhs_storage._status {return false} + if _storage._protocolPeriod != rhs_storage._protocolPeriod {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -522,11 +635,8 @@ extension _ProtoSWIMGossipPayload: SwiftProtobuf.Message, SwiftProtobuf._Message public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeRepeatedMessageField(value: &self.member) }() + case 1: try decoder.decodeRepeatedMessageField(value: &self.member) default: break } } diff --git a/Sources/DistributedCluster/Cluster/SWIM/SWIMActor.swift b/Sources/DistributedCluster/Cluster/SWIM/SWIMActor.swift index dbc4c85c4..8c6b0fb9d 100644 --- a/Sources/DistributedCluster/Cluster/SWIM/SWIMActor.swift +++ b/Sources/DistributedCluster/Cluster/SWIM/SWIMActor.swift @@ -35,10 +35,10 @@ internal distributed actor SWIMActor: SWIMPeer, SWIMAddressablePeer, CustomStrin nonisolated var swimNode: ClusterMembership.Node { .init( - protocol: self.id.uniqueNode.node.protocol, - host: self.id.uniqueNode.host, - port: self.id.uniqueNode.port, - uid: self.id.uniqueNode.nid.value + protocol: self.id.node.endpoint.protocol, + host: self.id.node.host, + port: self.id.node.port, + uid: self.id.node.nid.value ) } @@ -363,12 +363,12 @@ internal distributed actor SWIMActor: SWIMPeer, SWIMAddressablePeer, CustomStrin reachability = .unreachable } - guard let uniqueNode = change.member.node.asUniqueNode else { - self.log.warning("Unable to emit failureDetectorReachabilityChanged, for event: \(change), since can't represent member as uniqueNode!") + guard let node = change.member.node.asClusterNode else { + self.log.warning("Unable to emit failureDetectorReachabilityChanged, for event: \(change), since can't represent member as node!") return } - self.clusterRef.tell(.command(.failureDetectorReachabilityChanged(uniqueNode, reachability))) + self.clusterRef.tell(.command(.failureDetectorReachabilityChanged(node, reachability))) } private func handleGossipPayloadProcessedDirective(_ directive: SWIM.Instance.GossipProcessedDirective) { @@ -503,15 +503,15 @@ internal distributed actor SWIMActor: SWIMPeer, SWIMAddressablePeer, CustomStrin // ==== ------------------------------------------------------------------------------------------------------------ // MARK: Local functions - func monitor(node: UniqueNode) { - guard self.actorSystem.cluster.uniqueNode.node != node.node else { + func monitor(node: Cluster.Node) { + guard self.actorSystem.cluster.node.endpoint != node.endpoint else { return // no need to monitor ourselves, nor a replacement of us (if node is our replacement, we should have been dead already) } self.sendFirstRemotePing(on: node) } - nonisolated func confirmDead(node: UniqueNode) { + nonisolated func confirmDead(node: Cluster.Node) { Task { await self.whenLocal { __secretlyKnownToBeLocal in // TODO(distributed): rename once https://github.com/apple/swift/pull/42098 is implemented let directive = __secretlyKnownToBeLocal.swim.confirmDead(peer: node.asSWIMNode.swimShell(__secretlyKnownToBeLocal.actorSystem)) @@ -526,8 +526,8 @@ internal distributed actor SWIMActor: SWIMPeer, SWIMAddressablePeer, CustomStrin } /// This is effectively joining the SWIM membership of the other member. - private func sendFirstRemotePing(on targetUniqueNode: UniqueNode) { - let targetNode = ClusterMembership.Node(uniqueNode: targetUniqueNode) + private func sendFirstRemotePing(on targetUniqueNode: Cluster.Node) { + let targetNode = ClusterMembership.Node(node: targetUniqueNode) let targetPeer = targetNode.swimShell(self.actorSystem) // FIXME: expose addMember after all @@ -581,7 +581,7 @@ extension SWIMActor { } extension ActorID { - static func _swim(on node: UniqueNode) -> ActorID { + static func _swim(on node: Cluster.Node) -> ActorID { .init(remote: node, path: ActorPath._swim, incarnation: .wellKnown) } } diff --git a/Sources/DistributedCluster/Cluster/SystemMessages+Redelivery.swift b/Sources/DistributedCluster/Cluster/SystemMessages+Redelivery.swift index 5c9b38d40..6bd8287a1 100644 --- a/Sources/DistributedCluster/Cluster/SystemMessages+Redelivery.swift +++ b/Sources/DistributedCluster/Cluster/SystemMessages+Redelivery.swift @@ -25,7 +25,7 @@ import struct NIO.CircularBuffer /// which is used to drive re-delivery of system messages. System messages MUST NOT be dropped, and MUST /// be delivered in order, thus the re-delivery and local-delivery to the target actors is always done in /// sequence and without gaps. Redelivery also survives if a connection is borked and established anew with -/// the same `UniqueNode`. +/// the same `Cluster.Node`. /// /// Sequence numbers start from `1`, since zero is reserved for "no system messages were received/sent yet." /// diff --git a/Sources/DistributedCluster/Cluster/Transport/RemoteClusterActorPersonality.swift b/Sources/DistributedCluster/Cluster/Transport/RemoteClusterActorPersonality.swift index 2a40a76cf..99a1287c2 100644 --- a/Sources/DistributedCluster/Cluster/Transport/RemoteClusterActorPersonality.swift +++ b/Sources/DistributedCluster/Cluster/Transport/RemoteClusterActorPersonality.swift @@ -77,7 +77,7 @@ public final class _RemoteClusterActorPersonality { // Ensure we store as .remote, so printouts work as expected (and include the explicit address) var id = id - id._location = .remote(id.uniqueNode) + id._location = .remote(id.node) self.id = id self.clusterShell = shell @@ -129,7 +129,7 @@ public final class _RemoteClusterActorPersonality { return .association(assoc) } - let associationState = self.clusterShell.getEnsureAssociation(with: self.id.uniqueNode) + let associationState = self.clusterShell.getEnsureAssociation(with: self.id.node) switch associationState { case .association(let assoc): return .association(self._cachedAssociation.storeIfNilThenLoad(assoc)) diff --git a/Sources/DistributedCluster/Cluster/Transport/TransportPipelines.swift b/Sources/DistributedCluster/Cluster/Transport/TransportPipelines.swift index 8f46620df..e958e36f5 100644 --- a/Sources/DistributedCluster/Cluster/Transport/TransportPipelines.swift +++ b/Sources/DistributedCluster/Cluster/Transport/TransportPipelines.swift @@ -93,7 +93,7 @@ private final class InitiatingHandshakeHandler: ChannelInboundHandler, Removable } } catch { self.log.debug("Handshake failure, error [\(error)]:\(String(reflecting: type(of: error)))", metadata: metadata) - self.cluster.tell(.inbound(.handshakeFailed(self.handshakeOffer.targetNode, error))) + self.cluster.tell(.inbound(.handshakeFailed(self.handshakeOffer.targetEndpoint, error))) _ = context.close(mode: .all) } } @@ -114,9 +114,9 @@ final class ReceivingHandshakeHandler: ChannelInboundHandler, RemovableChannelHa private let log: Logger private let cluster: ClusterShell.Ref - private let localNode: UniqueNode + private let localNode: Cluster.Node - init(log: Logger, cluster: ClusterShell.Ref, localNode: UniqueNode) { + init(log: Logger, cluster: ClusterShell.Ref, localNode: Cluster.Node) { self.log = log self.cluster = cluster self.localNode = localNode @@ -415,7 +415,7 @@ internal final class SystemMessageRedeliveryHandler: ChannelDuplexHandler { self.log.error("Outbound system message queue overflow! MUST abort association, system state integrity cannot be ensured (e.g. terminated signals may have been lost).", metadata: [ "recipient": "\(transportEnvelope.recipient)", ]) - self.clusterShell.tell(.command(.downCommand(transportEnvelope.recipient.uniqueNode.node))) + self.clusterShell.tell(.command(.downCommand(transportEnvelope.recipient.node.endpoint))) } } @@ -706,7 +706,7 @@ private final class DumpRawBytesDebugHandler: ChannelInboundHandler { // MARK: "Server side" / accepting connections extension ClusterShell { - internal func bootstrapServerSide(system: ClusterSystem, shell: ClusterShell.Ref, bindAddress: UniqueNode, settings: ClusterSystemSettings, serializationPool: _SerializationPool) -> EventLoopFuture { + internal func bootstrapServerSide(system: ClusterSystem, shell: ClusterShell.Ref, bindNode: Cluster.Node, settings: ClusterSystemSettings, serializationPool: _SerializationPool) -> EventLoopFuture { let group: EventLoopGroup = settings.eventLoopGroup ?? settings.makeDefaultEventLoopGroup() // TODO: share the loop with client side? let bootstrap = ServerBootstrap(group: group) @@ -746,7 +746,7 @@ extension ClusterShell { ("magic validator", ProtocolMagicBytesValidator()), ("framing writer", LengthFieldPrepender(lengthFieldLength: .four, lengthFieldEndianness: .big)), ("framing reader", ByteToMessageHandler(Framing(lengthFieldLength: .four, lengthFieldEndianness: .big))), - ("receiving handshake handler", ReceivingHandshakeHandler(log: log, cluster: shell, localNode: bindAddress)), + ("receiving handshake handler", ReceivingHandshakeHandler(log: log, cluster: shell, localNode: bindNode)), // ("bytes dumper", DumpRawBytesDebugHandler(role: .server, log: log)), // FIXME: only include for debug -DSACT_TRACE_NIO things? ("wire envelope handler", WireEnvelopeHandler(serialization: serializationPool.serialization, log: log)), ("outbound serialization handler", OutboundSerializationHandler(log: log, serializationPool: serializationPool)), @@ -766,10 +766,10 @@ extension ClusterShell { .childChannelOption(ChannelOptions.maxMessagesPerRead, value: 16) .childChannelOption(ChannelOptions.recvAllocator, value: AdaptiveRecvByteBufferAllocator()) - return bootstrap.bind(host: bindAddress.node.host, port: Int(bindAddress.node.port)) // TODO: separate setup from using it + return bootstrap.bind(host: bindNode.endpoint.host, port: Int(bindNode.endpoint.port)) // TODO: separate setup from using it } - internal func bootstrapClientSide(system: ClusterSystem, shell: ClusterShell.Ref, targetNode: Node, handshakeOffer: Wire.HandshakeOffer, settings: ClusterSystemSettings, serializationPool: _SerializationPool) -> EventLoopFuture { + internal func bootstrapClientSide(system: ClusterSystem, shell: ClusterShell.Ref, targetNode: Cluster.Endpoint, handshakeOffer: Wire.HandshakeOffer, settings: ClusterSystemSettings, serializationPool: _SerializationPool) -> EventLoopFuture { let group: EventLoopGroup = settings.eventLoopGroup ?? settings.makeDefaultEventLoopGroup() // TODO: Implement "setup" inside settings, so that parts of bootstrap can be done there, e.g. by end users without digging into remoting internals diff --git a/Sources/DistributedCluster/Cluster/Transport/WireMessages.swift b/Sources/DistributedCluster/Cluster/Transport/WireMessages.swift index 884165625..dccfbfb81 100644 --- a/Sources/DistributedCluster/Cluster/Transport/WireMessages.swift +++ b/Sources/DistributedCluster/Cluster/Transport/WireMessages.swift @@ -42,8 +42,8 @@ internal enum Wire { internal struct HandshakeOffer: Equatable, WireMessage { internal var version: Version - internal var originNode: UniqueNode - internal var targetNode: Node + internal var originNode: Cluster.Node + internal var targetEndpoint: Cluster.Endpoint } internal enum HandshakeResponse: WireMessage { @@ -66,19 +66,19 @@ internal enum Wire { /// The node accepting the handshake. /// /// This will always be the "local" node where the accept is being made. - internal let targetNode: UniqueNode + internal let targetNode: Cluster.Node /// In order to avoid confusion with from/to, we name the `origin` the node which an *offer* was sent from, /// and we now reply to this handshake to it. This value is carried so the origin can confirm it indeed was /// intended for it, and not a previous incarnation of a system on the same network address. /// /// This will always be the "remote" node, with regards to where the accept is created. - internal let originNode: UniqueNode + internal let originNode: Cluster.Node /// MUST be called after the reply is written to the wire; triggers messages being flushed from the association. internal var onHandshakeReplySent: (() -> Void)? - init(version: Version, targetNode: UniqueNode, originNode: UniqueNode, whenHandshakeReplySent: (() -> Void)?) { + init(version: Version, targetNode: Cluster.Node, originNode: Cluster.Node, whenHandshakeReplySent: (() -> Void)?) { self.version = version self.targetNode = targetNode self.originNode = originNode @@ -91,13 +91,13 @@ internal enum Wire { internal let version: Version internal let reason: String - internal let targetNode: UniqueNode - internal let originNode: UniqueNode + internal let targetNode: Cluster.Node + internal let originNode: Cluster.Node /// MUST be called after the reply is written to the wire; triggers messages being flushed from the association. internal let onHandshakeReplySent: (() -> Void)? - init(version: Wire.Version, targetNode: UniqueNode, originNode: UniqueNode, reason: String, whenHandshakeReplySent: (() -> Void)?) { + init(version: Wire.Version, targetNode: Cluster.Node, originNode: Cluster.Node, reason: String, whenHandshakeReplySent: (() -> Void)?) { self.version = version self.targetNode = targetNode self.originNode = originNode diff --git a/Sources/DistributedCluster/ClusterEndpoint.swift b/Sources/DistributedCluster/ClusterEndpoint.swift new file mode 100644 index 000000000..f77c37d61 --- /dev/null +++ b/Sources/DistributedCluster/ClusterEndpoint.swift @@ -0,0 +1,88 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Distributed Actors open source project +// +// Copyright (c) 2018-2022 Apple Inc. and the Swift Distributed Actors project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.md for the list of Swift Distributed Actors project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +import Distributed +import Foundation + +// ==== ---------------------------------------------------------------------------------------------------------------- +// MARK: Cluster.Endpoint + +extension Cluster { + /// An `Endpoint` is a triplet of protocol, host and port that a node is bound to. + /// + /// Unlike `Cluster.Node`, it does not carry identity (`Cluster.Node.ID`) of a specific incarnation of an actor system node, + /// and represents an address of _any_ node that could live under this address. During the handshake process between two nodes, + /// the remote `Node` that the local side started out to connect with is "upgraded" to a `Cluster.Node`, as soon as we discover + /// the remote side's unique node identifier (`Cluster.Node.ID`). + /// + /// ### System name / human readable name + /// The `systemName` is NOT taken into account when comparing nodes. The system name is only utilized for human readability + /// and debugging purposes and participates neither in hashcode nor equality of a `Node`, as a node specifically is meant + /// to represent any unique node that can live on specific host & port. System names are useful for human operators, + /// intending to use some form of naming scheme, e.g. adopted from a cloud provider, to make it easier to map nodes in + /// actor system logs, to other external systems. + /// + /// - SeeAlso: For more details on unique node ids, refer to: ``Cluster.Node``. + public struct Endpoint: Hashable, Sendable { + // TODO: collapse into one String and index into it? + public var `protocol`: String + public var systemName: String // TODO: some other name, to signify "this is just for humans"? + public var host: String + public var port: Int + + public init(protocol: String, systemName: String, host: String, port: Int) { + precondition(port > 0, "port MUST be > 0") + self.protocol = `protocol` + self.systemName = systemName + self.host = host + self.port = port + } + + public init(systemName: String, host: String, port: Int) { + self.init(protocol: "sact", systemName: systemName, host: host, port: port) + } + + public init(host: String, port: Int) { + self.init(protocol: "sact", systemName: "", host: host, port: port) + } + } +} + +extension Cluster.Endpoint: CustomStringConvertible, CustomDebugStringConvertible { + public var description: String { + "\(self.protocol)://\(self.systemName)@\(self.host):\(self.port)" + } + + public var debugDescription: String { + self.description + } +} + +extension Cluster.Endpoint: Comparable { + // Silly but good enough comparison for deciding "who is lower node" + // as we only use those for "tie-breakers" any ordering is fine to be honest here. + public static func < (lhs: Cluster.Endpoint, rhs: Cluster.Endpoint) -> Bool { + "\(lhs)" < "\(rhs)" + } + + public func hash(into hasher: inout Hasher) { + hasher.combine(self.protocol) + hasher.combine(self.host) + hasher.combine(self.port) + } + + public static func == (lhs: Cluster.Endpoint, rhs: Cluster.Endpoint) -> Bool { + lhs.protocol == rhs.protocol && lhs.host == rhs.host && lhs.port == rhs.port + } +} diff --git a/Sources/DistributedCluster/ClusterNode.swift b/Sources/DistributedCluster/ClusterNode.swift new file mode 100644 index 000000000..28671e705 --- /dev/null +++ b/Sources/DistributedCluster/ClusterNode.swift @@ -0,0 +1,136 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Distributed Actors open source project +// +// Copyright (c) 2018-2022 Apple Inc. and the Swift Distributed Actors project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.md for the list of Swift Distributed Actors project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +import Distributed +import Foundation + +// ==== ---------------------------------------------------------------------------------------------------------------- +// MARK: Cluster.Node + +extension Cluster { + /// A _unique_ node which includes also the node's unique `UID` which is used to disambiguate + /// multiple incarnations of a system on the same host/port part -- similar to how an `ActorIncarnation` + /// is used on the per-actor level. + /// + /// ### Implementation details + /// The unique address of a remote node can only be obtained by performing the handshake with it. + /// Once the remote node accepts our handshake, it offers the other node its unique address. + /// Only once this address has been obtained can a node communicate with actors located on the remote node. + public struct Node: Hashable, Sendable { + public typealias ID = NodeID + + public var endpoint: Cluster.Endpoint + public let nid: NodeID + + public init(endpoint: Cluster.Endpoint, nid: NodeID) { + precondition(endpoint.port > 0, "port MUST be > 0") + self.endpoint = endpoint + self.nid = nid + } + + public init(protocol: String, systemName: String, host: String, port: Int, nid: NodeID) { + self.init(endpoint: Cluster.Endpoint(protocol: `protocol`, systemName: systemName, host: host, port: port), nid: nid) + } + + public init(systemName: String, host: String, port: Int, nid: NodeID) { + self.init(protocol: "sact", systemName: systemName, host: host, port: port, nid: nid) + } + + public var systemName: String { + set { + self.endpoint.systemName = newValue + } + get { + self.endpoint.systemName + } + } + + public var host: String { + set { + self.endpoint.host = newValue + } + get { + self.endpoint.host + } + } + + public var port: Int { + set { + self.endpoint.port = newValue + } + get { + self.endpoint.port + } + } + } +} + +extension Cluster.Node: CustomStringConvertible, CustomDebugStringConvertible { + public var description: String { + "\(self.endpoint)" + } + + public var debugDescription: String { + let a = self.endpoint + return "\(a.protocol)://\(a.systemName):\(self.nid)@\(a.host):\(a.port)" + } +} + +extension Cluster.Node: Comparable { + public static func == (lhs: Cluster.Node, rhs: Cluster.Node) -> Bool { + // we first compare the NodeIDs since they're quicker to compare and for diff systems always would differ, even if on same physical address + lhs.nid == rhs.nid && lhs.endpoint == rhs.endpoint + } + + // Silly but good enough comparison for deciding "who is lower node" + // as we only use those for "tie-breakers" any ordering is fine to be honest here. + public static func < (lhs: Cluster.Node, rhs: Cluster.Node) -> Bool { + if lhs.endpoint == rhs.endpoint { + return lhs.nid < rhs.nid + } else { + return lhs.endpoint < rhs.endpoint + } + } +} + +// ==== ---------------------------------------------------------------------------------------------------------------- +// MARK: NodeID + +extension Cluster.Node { + public struct NodeID: Hashable, Sendable { + let value: UInt64 + + public init(_ value: UInt64) { + self.value = value + } + } +} + +extension Cluster.Node.ID: Comparable { + public static func < (lhs: Cluster.Node.ID, rhs: Cluster.Node.ID) -> Bool { + lhs.value < rhs.value + } +} + +extension Cluster.Node.ID: CustomStringConvertible { + public var description: String { + "\(self.value)" + } +} + +extension Cluster.Node.ID { + public static func random() -> Cluster.Node.ID { + Cluster.Node.ID(UInt64.random(in: 1 ... .max)) + } +} diff --git a/Sources/DistributedCluster/ClusterSystem.swift b/Sources/DistributedCluster/ClusterSystem.swift index 65c4e4d19..f771866da 100644 --- a/Sources/DistributedCluster/ClusterSystem.swift +++ b/Sources/DistributedCluster/ClusterSystem.swift @@ -221,7 +221,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { /// - Faults: when configuration closure performs very illegal action, e.g. reusing a serializer identifier public convenience init(_ name: String, settings: ClusterSystemSettings) async { var settings = settings - settings.node.systemName = name + settings.endpoint.systemName = name await self.init(settings: settings) } @@ -230,7 +230,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { /// - Faults: when configuration closure performs very illegal action, e.g. reusing a serializer identifier public init(settings: ClusterSystemSettings) async { var settings = settings - self.name = settings.node.systemName + self.name = settings.endpoint.systemName // rely on swift-backtrace for pretty backtraces on crashes if settings.installSwiftBacktrace { @@ -248,7 +248,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { self.dispatcher = try! _FixedThreadPool(settings.threadPoolSize) // initialize top level guardians - self._root = TheOneWhoHasNoParent(local: settings.uniqueBindNode) + self._root = TheOneWhoHasNoParent(local: settings.bindNode) let theOne = self._root let initializationLock = ReadWriteLock() @@ -262,7 +262,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { } if settings.enabled { - settings.logging._logger[metadataKey: "cluster/node"] = "\(settings.uniqueBindNode)" + settings.logging._logger[metadataKey: "cluster/node"] = "\(settings.bindNode)" } else { settings.logging._logger[metadataKey: "cluster/node"] = "\(self.name)" } @@ -288,11 +288,11 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { _ = self._serialization.storeIfNilThenLoad(serialization) // dead letters init - self._deadLetters = _ActorRef(.deadLetters(.init(self.log, id: ActorID._deadLetters(on: settings.uniqueBindNode), system: self))) + self._deadLetters = _ActorRef(.deadLetters(.init(self.log, id: ActorID._deadLetters(on: settings.bindNode), system: self))) // actor providers - let localUserProvider = LocalActorRefProvider(root: _Guardian(parent: theOne, name: "user", localNode: settings.uniqueBindNode, system: self)) - let localSystemProvider = LocalActorRefProvider(root: _Guardian(parent: theOne, name: "system", localNode: settings.uniqueBindNode, system: self)) + let localUserProvider = LocalActorRefProvider(root: _Guardian(parent: theOne, name: "user", localNode: settings.bindNode, system: self)) + let localSystemProvider = LocalActorRefProvider(root: _Guardian(parent: theOne, name: "system", localNode: settings.bindNode, system: self)) // TODO: want to reconcile those into one, and allow /dead as well var effectiveUserProvider: _ActorRefProvider = localUserProvider var effectiveSystemProvider: _ActorRefProvider = localSystemProvider @@ -384,7 +384,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { await self.settings.plugins.startAll(self) if settings.enabled { - self.log.info("ClusterSystem [\(self.name)] initialized, listening on: \(self.settings.uniqueBindNode): \(self.cluster.ref)") + self.log.info("ClusterSystem [\(self.name)] initialized, listening on: \(self.settings.bindNode): \(self.cluster.ref)") self.log.info("Setting in effect: .autoLeaderElection: \(self.settings.autoLeaderElection)") self.log.info("Setting in effect: .downingStrategy: \(self.settings.downingStrategy)") @@ -491,7 +491,7 @@ public class ClusterSystem: DistributedActorSystem, @unchecked Sendable { self.shutdownSemaphore.wait() /// Down this node as part of shutting down; it may have enough time to notify other nodes on an best effort basis. - self.cluster.down(node: self.settings.node) + self.cluster.down(endpoint: self.settings.endpoint) let pluginsSemaphore = DispatchSemaphore(value: 1) Task { @@ -569,7 +569,7 @@ extension ClusterSystem: CustomStringConvertible { var res = "ClusterSystem(" res.append(self.name) if self.settings.enabled { - res.append(", \(self.cluster.uniqueNode)") + res.append(", \(self.cluster.node)") } res.append(")") return res @@ -881,9 +881,9 @@ extension ClusterSystem { } // If the actor is not located on this node, immediately resolve as "remote" - guard self.cluster.uniqueNode == id.uniqueNode else { + guard self.cluster.node == id.node else { if self.settings.logging.verboseResolve { - self.log.trace("Resolved \(id) as remote, on node: \(id.uniqueNode)") + self.log.trace("Resolved \(id) as remote, on node: \(id.node)") } return nil } @@ -1126,7 +1126,7 @@ extension ClusterSystem { return try await interceptor.interceptRemoteCall(on: actor, target: target, invocation: &invocation, throwing: throwing, returning: returning) } - guard __isRemoteActor(actor), actor.id.uniqueNode != self.cluster.uniqueNode else { + guard __isRemoteActor(actor), actor.id.node != self.cluster.node else { // It actually is a remote call, so redirect it to local call-path. // Such calls can happen when we deal with interceptors and proxies; // To make their lives easier, we centralize the noticing when a call is local and dispatch it from here. @@ -1185,7 +1185,7 @@ extension ClusterSystem { return try await interceptor.interceptRemoteCallVoid(on: actor, target: target, invocation: &invocation, throwing: throwing) } - guard __isRemoteActor(actor), actor.id.uniqueNode != self.cluster.uniqueNode else { + guard __isRemoteActor(actor), actor.id.node != self.cluster.node else { // It actually is a remote call, so redirect it to local call-path. // Such calls can happen when we deal with interceptors and proxies; // To make their lives easier, we centralize the noticing when a call is local and dispatch it from here. @@ -1318,8 +1318,8 @@ extension ClusterSystem { Res: Codable { precondition( - self.cluster.uniqueNode == actor.id.uniqueNode, - "Attempted to localCall an actor whose ID was a different node: [\(actor.id)], current node: \(self.cluster.uniqueNode)" + self.cluster.node == actor.id.node, + "Attempted to localCall an actor whose ID was a different node: [\(actor.id)], current node: \(self.cluster.node)" ) self.log.trace("Execute local call", metadata: [ "actor/id": "\(actor.id.fullDescription)", @@ -1363,8 +1363,8 @@ extension ClusterSystem { Err: Error { precondition( - self.cluster.uniqueNode == actor.id.uniqueNode, - "Attempted to localCall an actor whose ID was a different node: [\(actor.id)], current node: \(self.cluster.uniqueNode)" + self.cluster.node == actor.id.node, + "Attempted to localCall an actor whose ID was a different node: [\(actor.id)], current node: \(self.cluster.node)" ) self.log.trace("Execute local void call", metadata: [ "actor/id": "\(actor.id.fullDescription)", @@ -1462,8 +1462,8 @@ extension ClusterSystem { } // If the actor is not located on this node, immediately resolve as "remote" - guard self.cluster.uniqueNode == id.uniqueNode else { - self.log.trace("Resolve local failed, ID is for a remote host: \(id.uniqueNode)", metadata: ["actor/id": "\(id)"]) + guard self.cluster.node == id.node else { + self.log.trace("Resolve local failed, ID is for a remote host: \(id.node)", metadata: ["actor/id": "\(id)"]) return nil } @@ -1799,7 +1799,7 @@ public struct RemoteCallError: DistributedActorSystemError, CustomStringConverti } internal init(_ error: _RemoteCallError, file: String = #fileID, line: UInt = #line) { - let actorID = ActorID._deadLetters(on: UniqueNode.init(protocol: "dead", systemName: "", host: "", port: 1, nid: .init(0))) + let actorID = ActorID._deadLetters(on: Cluster.Node.init(protocol: "dead", systemName: "", host: "", port: 1, nid: .init(0))) let target = RemoteCallTarget("") self.underlying = _Storage(error: error, actorID: actorID, target: target, file: file, line: line) } diff --git a/Sources/DistributedCluster/ClusterSystemSettings.swift b/Sources/DistributedCluster/ClusterSystemSettings.swift index e66c6a0de..330f3229d 100644 --- a/Sources/DistributedCluster/ClusterSystemSettings.swift +++ b/Sources/DistributedCluster/ClusterSystemSettings.swift @@ -28,8 +28,8 @@ public struct ClusterSystemSettings { } public static var `default`: ClusterSystemSettings { - let defaultNode = Node(systemName: Default.name, host: Default.bindHost, port: Default.bindPort) - return ClusterSystemSettings(node: defaultNode) + let defaultEndpoint = Cluster.Endpoint(systemName: Default.name, host: Default.bindHost, port: Default.bindPort) + return ClusterSystemSettings(endpoint: defaultEndpoint) } public typealias ProtocolName = String @@ -70,44 +70,44 @@ public struct ClusterSystemSettings { /// Hostname used to accept incoming connections from other nodes. public var bindHost: String { set { - self.node.host = newValue + self.endpoint.host = newValue } get { - self.node.host + self.endpoint.host } } /// Port used to accept incoming connections from other nodes. public var bindPort: Int { set { - self.node.port = newValue + self.endpoint.port = newValue } get { - self.node.port + self.endpoint.port } } /// Node representing this node in the cluster. /// Note that most of the time `uniqueBindNode` is more appropriate, as it includes this node's unique id. - public var node: Node { + public var endpoint: Cluster.Endpoint { didSet { - self.serialization.localNode = self.uniqueBindNode - self.metrics.systemName = self.node.systemName - self.swim.metrics.systemName = self.node.systemName + self.serialization.localNode = self.bindNode + self.metrics.systemName = self.endpoint.systemName + self.swim.metrics.systemName = self.endpoint.systemName } } - /// `NodeID` to be used when exposing `UniqueNode` for node configured by using these settings. - public var nid: UniqueNodeID { + /// `Cluster.Node.ID` to be used when exposing `Cluster.Node` for node configured by using these settings. + public var nid: Cluster.Node.ID { didSet { - self.serialization.localNode = self.uniqueBindNode + self.serialization.localNode = self.bindNode } } /// Reflects the `bindAddress` however carries a uniquely assigned UID. /// The UID remains the same throughout updates of the `bindAddress` field. - public var uniqueBindNode: UniqueNode { - UniqueNode(node: self.node, nid: self.nid) + public var bindNode: Cluster.Node { + .init(endpoint: self.endpoint, nid: self.nid) } /// Time after which a the binding of the server port should fail. @@ -140,7 +140,7 @@ public struct ClusterSystemSettings { // ==== ------------------------------------------------------------------------------------------------------------ // MARK: Cluster protocol versioning - /// `ProtocolVersion` to be used when exposing `UniqueNode` for node configured by using these settings. + /// `ProtocolVersion` to be used when exposing `Cluster.Node` for node configured by using these settings. public var protocolVersion: ClusterSystem.Version { self._protocolVersion } @@ -252,22 +252,22 @@ public struct ClusterSystemSettings { public var threadPoolSize: Int = ProcessInfo.processInfo.activeProcessorCount public init(name: String, host: String = Default.bindHost, port: Int = Default.bindPort, tls: TLSConfiguration? = nil) { - self.init(node: Node(systemName: name, host: host, port: port), tls: tls) + self.init(endpoint: Cluster.Endpoint(systemName: name, host: host, port: port), tls: tls) } - public init(node: Node, tls: TLSConfiguration? = nil) { - self.node = node - self.nid = UniqueNodeID.random() + public init(endpoint: Cluster.Endpoint, tls: TLSConfiguration? = nil) { + self.endpoint = endpoint + self.nid = Cluster.Node.ID.random() self.tls = tls self.swim = SWIM.Settings() self.swim.unreachability = .enabled - if node.systemName != "" { - self.metrics.systemName = node.systemName - self.swim.metrics.systemName = node.systemName + if endpoint.systemName != "" { + self.metrics.systemName = endpoint.systemName + self.swim.metrics.systemName = endpoint.systemName } self.swim.metrics.labelPrefix = "cluster.swim" self.discovery = nil - self.serialization.localNode = self.uniqueBindNode + self.serialization.localNode = self.bindNode } } @@ -404,10 +404,10 @@ protocol ClusterSystemInstrumentationProvider { /// all the nodes of an existing cluster. public struct ServiceDiscoverySettings { let implementation: ServiceDiscoveryImplementation - private let _subscribe: (@escaping (Result<[Node], Error>) -> Void, @escaping (CompletionReason) -> Void) -> CancellationToken? + private let _subscribe: (@escaping (Result<[Cluster.Endpoint], Error>) -> Void, @escaping (CompletionReason) -> Void) -> CancellationToken? public init(_ implementation: Discovery, service: S) - where Discovery: ServiceDiscovery, Discovery.Instance == Node, + where Discovery: ServiceDiscovery, Discovery.Instance == Cluster.Endpoint, S == Discovery.Service { self.implementation = .dynamic(AnyServiceDiscovery(implementation)) @@ -416,18 +416,18 @@ public struct ServiceDiscoverySettings { } } - public init(_ implementation: Discovery, service: S, mapInstanceToNode transformer: @escaping (Discovery.Instance) throws -> Node) + public init(_ implementation: Discovery, service: S, mapInstanceToNode transformer: @escaping (Discovery.Instance) throws -> Cluster.Endpoint) where Discovery: ServiceDiscovery, S == Discovery.Service { - let mappedDiscovery: MapInstanceServiceDiscovery = implementation.mapInstance(transformer) + let mappedDiscovery: MapInstanceServiceDiscovery = implementation.mapInstance(transformer) self.implementation = .dynamic(AnyServiceDiscovery(mappedDiscovery)) self._subscribe = { onNext, onComplete in mappedDiscovery.subscribe(to: service, onNext: onNext, onComplete: onComplete) } } - public init(static nodes: Set) { + public init(static nodes: Set) { self.implementation = .static(nodes) self._subscribe = { onNext, _ in // Call onNext once and never again since the list of nodes doesn't change @@ -441,12 +441,12 @@ public struct ServiceDiscoverySettings { /// Similar to `ServiceDiscovery.subscribe` however it allows the handling of the listings to be generic and handled by the cluster system. /// This function is only intended for internal use by the `DiscoveryShell`. - func subscribe(onNext nextResultHandler: @escaping (Result<[Node], Error>) -> Void, onComplete completionHandler: @escaping (CompletionReason) -> Void) -> CancellationToken? { + func subscribe(onNext nextResultHandler: @escaping (Result<[Cluster.Endpoint], Error>) -> Void, onComplete completionHandler: @escaping (CompletionReason) -> Void) -> CancellationToken? { self._subscribe(nextResultHandler, completionHandler) } enum ServiceDiscoveryImplementation { - case `static`(Set) + case `static`(Set) case dynamic(AnyServiceDiscovery) } } diff --git a/Sources/DistributedCluster/DeadLetters.swift b/Sources/DistributedCluster/DeadLetters.swift index 227216b6c..282952ae6 100644 --- a/Sources/DistributedCluster/DeadLetters.swift +++ b/Sources/DistributedCluster/DeadLetters.swift @@ -62,7 +62,7 @@ extension ClusterSystem { public func personalDeadLetters(type: Message.Type = Message.self, recipient: ActorID) -> _ActorRef { // TODO: rather could we send messages to self._deadLetters with enough info so it handles properly? - guard recipient.uniqueNode == self.settings.uniqueBindNode else { + guard recipient.node == self.settings.bindNode else { /// While it should not realistically happen that a dead letter is obtained for a remote reference, /// we do allow for construction of such ref. It can be used to signify a ref is known to resolve to /// a known to be down cluster node. @@ -75,10 +75,10 @@ extension ClusterSystem { let localRecipient: ActorID if recipient.path.segments.first == ActorPath._dead.segments.first { // drop the node from the address; and we know the pointed at ref is already dead; do not prefix it again - localRecipient = ActorID(local: self.settings.uniqueBindNode, path: recipient.path, incarnation: recipient.incarnation) + localRecipient = ActorID(local: self.settings.bindNode, path: recipient.path, incarnation: recipient.incarnation) } else { // drop the node from the address; and prepend it as known-to-be-dead - localRecipient = ActorID(local: self.settings.uniqueBindNode, path: ActorPath._dead.appending(segments: recipient.segments), incarnation: recipient.incarnation) + localRecipient = ActorID(local: self.settings.bindNode, path: ActorPath._dead.appending(segments: recipient.segments), incarnation: recipient.incarnation) } return _ActorRef(.deadLetters(.init(self.log, id: localRecipient, system: self))).adapt(from: Message.self) } @@ -184,7 +184,7 @@ public final class DeadLetterOffice { let recipientString: String if let recipient = deadLetter.recipient { - let deadID: ActorID = .init(remote: recipient.uniqueNode, path: recipient.path, incarnation: recipient.incarnation) + let deadID: ActorID = .init(remote: recipient.node, path: recipient.path, incarnation: recipient.incarnation) // should not really happen, as the only way to get a remote ref is to resolve it, and a remote resolve always yields a remote ref // thus, it is impossible to resolve a remote address into a dead ref; however keeping this path in case we manually make such mistake diff --git a/Sources/DistributedCluster/Docs.docc/ClusterSingleton.md b/Sources/DistributedCluster/Docs.docc/ClusterSingleton.md index b55089cdd..9cd374306 100644 --- a/Sources/DistributedCluster/Docs.docc/ClusterSingleton.md +++ b/Sources/DistributedCluster/Docs.docc/ClusterSingleton.md @@ -133,11 +133,11 @@ let boss = try await system.singleton.host(name: "boss", settings: bossSingleton } actor CustomSingletonAllocationStrategy: ClusterSingletonAllocationStrategy { - func onClusterEvent(_ clusterEvent: Cluster.Event) async -> UniqueNode? { + func onClusterEvent(_ clusterEvent: Cluster.Event) async -> Cluster.Node? { fatalError() } - var node: UniqueNode? { + var node: Cluster.Node? { get async { fatalError() } diff --git a/Sources/DistributedCluster/Docs.docc/Clustering.md b/Sources/DistributedCluster/Docs.docc/Clustering.md index df6b4cd00..43c096d2a 100644 --- a/Sources/DistributedCluster/Docs.docc/Clustering.md +++ b/Sources/DistributedCluster/Docs.docc/Clustering.md @@ -32,8 +32,8 @@ For more realistic uses, it is expected that you will configure your cluster sys struct Main { static func main() async throws { let system = await ClusterSystem("FirstSystem") { settings in - settings.node.host = "127.0.0.1" - settings.node.port = 7337 + settings.endpoint.host = "127.0.0.1" + settings.endpoint.port = 7337 } try await system.terminated @@ -64,18 +64,24 @@ In the simplest scenario we already know about some existing node that we can jo This is done using the system's ``ClusterControl`` object, like this: ```swift -system.cluster.join(node: Node(systemName: "JoiningExample", host: "127.0.0.1", port: 8228)) +system.cluster.join(endpoint: Cluster.Node(systemName: "JoiningExample", host: "127.0.0.1", port: 8228)) ``` -> Note: The difference between a ``Node`` and ``UniqueNode`` is that a ``Node`` is "some node on that address", while -> an ``UniqueNode`` is a node that we have contacted and know its exact unique node identifier. Therefore, when reaching -> out to a node we know nothing about just yet, we use the `Node` type. +> Note: The difference between an ``Cluster/Endpoint`` and ``Cluster/Node`` is that an ``Cluster/Endpoint`` only represents +> where we expect a cluster node to be listening for connections – effectively a `protocol`, `host` and `port` triplet. +> While a ``Cluster/Node`` is a specific, unique, node that we have connected to and now know its exact unique node identifier. +> +> Therefore, when reaching out to a node we know nothing about just yet, we use the ``Cluster/Endpoint`` type, +> and the ``Cluster/Node`` in all following interactions. +> +> Furthermore, a ``Cluster/Member`` further enriches a node with information about its cluster membership status +> (``Cluster/MemberStatus`` and ``Cluster/MemberReachability``). You can observe ``Cluster/Event``s emitted by `system.cluster.events` (``ClusterControl/events``) in order to see when a node has been successfully joined. There is also convenience APIs available on ``ClusterControl`` (`system.cluster`): -- ``ClusterControl/joined(node:within:)-2o4kd`` which allows you to suspend until a specific node becomes ``Cluster/MemberStatus/joining`` in the cluster membership, or -- ``ClusterControl/waitFor(_:_:within:)-126aq`` which allows you to suspend until a node reaches a specific ``Cluster/MemberStatus``. +- ``ClusterControl/joined(endpoint:within:)`` which allows you to suspend until a specific node becomes ``Cluster/MemberStatus/joining`` in the cluster membership, or +- ``ClusterControl/waitFor(_:_:within:)-1xiqo`` which allows you to suspend until a node reaches a specific ``Cluster/MemberStatus``. ### Automatic Node Discovery @@ -108,7 +114,7 @@ Generally, one should not need to rely on the low-level clustering events emitte Having that said, some actors (or other parts of your program) may be interested in the raw event stream offered by the cluster system. For example, one can implement a stability report by observing how frequently ``Cluster/ReachabilityChange`` events are emitted, or take it one level further and implement your own ``DowningStrategy`` based on observing those reachability changes. -Events emitted by the cluster, are always expressed in terms of cluster _members_ (``Cluster/Member``), which represent some concrete ``UniqueNode`` which is part of the membership. As soon as a node becomes part of the membership, even while it is only ``Cluster/MemberStatus/joining``, events about it will be emitted by the cluster. +Events emitted by the cluster, are always expressed in terms of cluster _members_ (``Cluster/Member``), which represent some concrete ``Cluster/Node`` which is part of the membership. As soon as a node becomes part of the membership, even while it is only ``Cluster/MemberStatus/joining``, events about it will be emitted by the cluster. A cluster member goes through the following phases in its lifecycle: @@ -159,7 +165,7 @@ var membership = Cluster.Membership.empty for await event in system.cluster.events { if case .membershipChanged(let change) = event { - guard change.node == system.cluster.uniqueNode else { + guard change.node == system.cluster.node else { continue } guard change.isUp else { diff --git a/Sources/DistributedCluster/Docs.docc/Security.md b/Sources/DistributedCluster/Docs.docc/Security.md index 083bc5e3c..cfa26d65d 100644 --- a/Sources/DistributedCluster/Docs.docc/Security.md +++ b/Sources/DistributedCluster/Docs.docc/Security.md @@ -38,7 +38,7 @@ let testCertificateSource1: NIOSSLCertificateSource = .certificate(testCertifica let testKeySource1: NIOSSLPrivateKeySource = .privateKey(try NIOSSLPrivateKey(bytes: [UInt8](testKey1.utf8), format: .pem)) let tlsExampleSystem = await ClusterSystem("tls-example") { settings in - settings.node.host = "..." + settings.endpoint.host = "..." settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource1], privateKey: testKeySource1 diff --git a/Sources/DistributedCluster/Gossip/Gossiper+Shell.swift b/Sources/DistributedCluster/Gossip/Gossiper+Shell.swift index 74ea93733..5ea39bef0 100644 --- a/Sources/DistributedCluster/Gossip/Gossiper+Shell.swift +++ b/Sources/DistributedCluster/Gossip/Gossiper+Shell.swift @@ -289,7 +289,7 @@ extension GossipShell { case .onClusterMember(let atLeastStatus, let resolvePeerOn): func resolveInsertPeer(_ context: _ActorContext, member: Cluster.Member) { - guard member.uniqueNode != context.system.cluster.uniqueNode else { + guard member.node != context.system.cluster.node else { return // ignore self node } diff --git a/Sources/DistributedCluster/LifecycleMonitoring/LifecycleWatchContainer.swift b/Sources/DistributedCluster/LifecycleMonitoring/LifecycleWatchContainer.swift index 405e65be4..569ffecf1 100644 --- a/Sources/DistributedCluster/LifecycleMonitoring/LifecycleWatchContainer.swift +++ b/Sources/DistributedCluster/LifecycleMonitoring/LifecycleWatchContainer.swift @@ -211,10 +211,10 @@ extension LifecycleWatchContainer { /// /// Does NOT immediately handle these `Terminated` signals, they are treated as any other normal signal would, /// such that the user can have a chance to handle and react to them. - private func receiveNodeTerminated(_ terminatedNode: UniqueNode) { + private func receiveNodeTerminated(_ terminatedNode: Cluster.Node) { // TODO: remove actors as we notify about them for (watched, _) in self.watching { - guard watched.uniqueNode == terminatedNode else { + guard watched.node == terminatedNode else { continue } @@ -246,7 +246,7 @@ extension LifecycleWatchContainer { } // ==== ------------------------------------------------------------------------------------------------------------ - // MARK: Node termination + // MARK: Cluster.Node termination private func subscribeNodeTerminatedEvents( watchedID: ActorID, @@ -254,15 +254,15 @@ extension LifecycleWatchContainer { ) { self.nodeDeathWatcher?.tell( // different actor .remoteDistributedActorWatched( - remoteNode: watchedID.uniqueNode, + remoteNode: watchedID.node, watcherID: self.watcherID, - nodeTerminated: { [weak self, weak system] uniqueNode in + nodeTerminated: { [weak self, weak system] node in guard let self else { return } Task { - self.receiveNodeTerminated(uniqueNode) + self.receiveNodeTerminated(node) } guard let system = system else { @@ -270,7 +270,7 @@ extension LifecycleWatchContainer { } let myselfRef = system._resolveUntyped(context: .init(id: self.watcherID, system: system)) - myselfRef._sendSystemMessage(.nodeTerminated(uniqueNode), file: file, line: line) + myselfRef._sendSystemMessage(.nodeTerminated(node), file: file, line: line) } ) ) diff --git a/Sources/DistributedCluster/LifecycleMonitoring/_BehaviorDeathWatch.swift b/Sources/DistributedCluster/LifecycleMonitoring/_BehaviorDeathWatch.swift index 8bd37d5c9..50d5f0c70 100644 --- a/Sources/DistributedCluster/LifecycleMonitoring/_BehaviorDeathWatch.swift +++ b/Sources/DistributedCluster/LifecycleMonitoring/_BehaviorDeathWatch.swift @@ -273,8 +273,8 @@ internal struct DeathWatchImpl { /// /// Does NOT immediately handle these `Terminated` signals, they are treated as any other normal signal would, /// such that the user can have a chance to handle and react to them. - public mutating func receiveNodeTerminated(_ terminatedNode: UniqueNode, myself: _ReceivesSystemMessages) { - for watched: _AddressableActorRef in self.watching.keys where watched.id.uniqueNode == terminatedNode { + public mutating func receiveNodeTerminated(_ terminatedNode: Cluster.Node, myself: _ReceivesSystemMessages) { + for watched: _AddressableActorRef in self.watching.keys where watched.id.node == terminatedNode { // we KNOW an actor existed if it is local and not resolved as /dead; otherwise it may have existed // for a remote ref we don't know for sure if it existed let existenceConfirmed = watched.refType.isLocal && !watched.id.path.starts(with: ._dead) @@ -296,13 +296,13 @@ internal struct DeathWatchImpl { } // ==== ------------------------------------------------------------------------------------------------------------ - // MARK: Node termination + // MARK: Cluster.Node termination private func subscribeNodeTerminatedEvents(myself: _ActorRef, watchedID: ActorID, file: String = #filePath, line: UInt = #line) { guard watchedID._isRemote else { return } - self.nodeDeathWatcher.tell(.remoteActorWatched(watcher: _AddressableActorRef(myself), remoteNode: watchedID.uniqueNode), file: file, line: line) + self.nodeDeathWatcher.tell(.remoteActorWatched(watcher: _AddressableActorRef(myself), remoteNode: watchedID.node), file: file, line: line) } } diff --git a/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonAllocationStrategy.swift b/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonAllocationStrategy.swift index 67579a2cf..a1186a811 100644 --- a/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonAllocationStrategy.swift +++ b/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonAllocationStrategy.swift @@ -15,15 +15,15 @@ // ==== ---------------------------------------------------------------------------------------------------------------- // MARK: Protocol for singleton allocation strategy -/// Strategy for choosing a `UniqueNode` to allocate singleton. +/// Strategy for choosing a `Cluster.Node` to allocate singleton. public protocol ClusterSingletonAllocationStrategy { /// Receives and handles the `Cluster.Event`. /// /// - Returns: The current `node` after processing `clusterEvent`. - func onClusterEvent(_ clusterEvent: Cluster.Event) async -> UniqueNode? + func onClusterEvent(_ clusterEvent: Cluster.Event) async -> Cluster.Node? /// The currently allocated `node` for the singleton. - var node: UniqueNode? { get async } + var node: Cluster.Node? { get async } } // ==== ---------------------------------------------------------------------------------------------------------------- @@ -31,25 +31,25 @@ public protocol ClusterSingletonAllocationStrategy { /// An `AllocationStrategy` in which selection is based on cluster leadership. public final class ClusterSingletonAllocationByLeadership: ClusterSingletonAllocationStrategy { - var _node: UniqueNode? + var _node: Cluster.Node? public init(settings: ClusterSingletonSettings, actorSystem: ClusterSystem) { // not used... } - public func onClusterEvent(_ clusterEvent: Cluster.Event) async -> UniqueNode? { + public func onClusterEvent(_ clusterEvent: Cluster.Event) async -> Cluster.Node? { switch clusterEvent { case .leadershipChange(let change): - self._node = change.newLeader?.uniqueNode + self._node = change.newLeader?.node case .snapshot(let membership): - self._node = membership.leader?.uniqueNode + self._node = membership.leader?.node default: () // ignore other events } return self._node } - public var node: UniqueNode? { + public var node: Cluster.Node? { get async { self._node } diff --git a/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonBoss.swift b/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonBoss.swift index 59b8cd3d0..b7c46c1c8 100644 --- a/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonBoss.swift +++ b/Sources/DistributedCluster/Plugins/ClusterSingleton/ClusterSingletonBoss.swift @@ -53,9 +53,9 @@ internal distributed actor ClusterSingletonBoss: ClusterS let singletonFactory: ((ClusterSystem) async throws -> Act)? /// The node that the singleton runs on. - private var targetNode: UniqueNode? - private var selfNode: UniqueNode { - self.actorSystem.cluster.uniqueNode + private var targetNode: Cluster.Node? + private var selfNode: Cluster.Node { + self.actorSystem.cluster.node } /// The target singleton instance we should forward invocations to. @@ -116,7 +116,7 @@ internal distributed actor ClusterSingletonBoss: ClusterS try await self.updateTargetNode(node: node) } - private func updateTargetNode(node: UniqueNode?) async throws { + private func updateTargetNode(node: Cluster.Node?) async throws { guard self.targetNode != node else { self.log.trace("Skip updating target node. New node is already the same as current targetNode.", metadata: self.metadata()) return @@ -138,7 +138,7 @@ internal distributed actor ClusterSingletonBoss: ClusterS } } - private func takeOver(from: UniqueNode?) async throws { + private func takeOver(from: Cluster.Node?) async throws { guard let singletonFactory = self.singletonFactory else { preconditionFailure("Cluster singleton [\(self.settings.name)] cannot run on this node. Please review ClusterSingletonAllocationStrategySettings and/or cluster singleton usage.") } @@ -155,7 +155,7 @@ internal distributed actor ClusterSingletonBoss: ClusterS self.updateSingleton(singleton) } - internal func handOver(to: UniqueNode?) { + internal func handOver(to: Cluster.Node?) { self.log.debug("Hand over singleton [\(self.settings.name)] to [\(String(describing: to))]", metadata: self.metadata()) guard let instance = self.targetSingleton else { @@ -181,11 +181,11 @@ internal distributed actor ClusterSingletonBoss: ClusterS } } - private func updateSingleton(node: UniqueNode?) throws { + private func updateSingleton(node: Cluster.Node?) throws { switch node { - case .some(let node) where node == self.actorSystem.cluster.uniqueNode: + case .some(let node) where node == self.actorSystem.cluster.node: // This must have been a result of an activate() and the singleton must be stored locally - precondition(self.targetSingleton?.id.uniqueNode == self.actorSystem.cluster.uniqueNode) + precondition(self.targetSingleton?.id.node == self.actorSystem.cluster.node) return case .some(let otherNode): var targetSingletonID = ActorID(remote: otherNode, type: Act.self, incarnation: .wellKnown) @@ -215,7 +215,7 @@ internal distributed actor ClusterSingletonBoss: ClusterS self.allocationTimeoutTask = nil if !buffer.isEmpty { - self.log.debug("Flushing \(buffer.count) remote calls to [\(Act.self)] on [\(singleton.id.uniqueNode)]", metadata: self.metadata()) + self.log.debug("Flushing \(buffer.count) remote calls to [\(Act.self)] on [\(singleton.id.node)]", metadata: self.metadata()) while let (callID, continuation) = self.buffer.take() { // FIXME: the callIDs are not used in the actual call making but could be for better consistency continuation.resume(returning: singleton) } @@ -421,9 +421,9 @@ extension ClusterSingletonBoss { let singleton = self.targetSingleton { assert( - singleton.id.uniqueNode == selfNode, + singleton.id.node == selfNode, "Target singleton node and targetNode were not the same! TargetNode: \(targetNode)," + - " singleton.id.uniqueNode: \(singleton.id.uniqueNode)" + " singleton.id.node: \(singleton.id.node)" ) return try await singleton.actorSystem.localCall( on: singleton, @@ -465,9 +465,9 @@ extension ClusterSingletonBoss { self.log.trace("ENTER forwardOrStashRemoteCallVoid \(target) -> DIRECT LOCAL CALL") assert( - singleton.id.uniqueNode == selfNode, + singleton.id.node == selfNode, "Target singleton node and targetNode were not the same! TargetNode: \(targetNode)," + - " singleton.id.uniqueNode: \(singleton.id.uniqueNode)" + " singleton.id.node: \(singleton.id.node)" ) return try await singleton.actorSystem.localCallVoid( on: singleton, diff --git a/Sources/DistributedCluster/Protobuf/ActorID+Serialization.swift b/Sources/DistributedCluster/Protobuf/ActorID+Serialization.swift index 73b6e5948..56b46c28b 100644 --- a/Sources/DistributedCluster/Protobuf/ActorID+Serialization.swift +++ b/Sources/DistributedCluster/Protobuf/ActorID+Serialization.swift @@ -22,7 +22,7 @@ extension ActorID: Codable { metadataSettings?.encodeCustomMetadata ?? ({ _, _ in () }) var container = encoder.container(keyedBy: ActorCoding.CodingKeys.self) - try container.encode(self.uniqueNode, forKey: ActorCoding.CodingKeys.node) + try container.encode(self.node, forKey: ActorCoding.CodingKeys.node) try container.encode(self.path, forKey: ActorCoding.CodingKeys.path) // TODO: remove as we remove the tree try container.encode(self.incarnation, forKey: ActorCoding.CodingKeys.incarnation) @@ -57,7 +57,7 @@ extension ActorID: Codable { public init(from decoder: Decoder) throws { let container = try decoder.container(keyedBy: ActorCoding.CodingKeys.self) - let node = try container.decode(UniqueNode.self, forKey: ActorCoding.CodingKeys.node) + let node = try container.decode(Cluster.Node.self, forKey: ActorCoding.CodingKeys.node) let path = try container.decodeIfPresent(ActorPath.self, forKey: ActorCoding.CodingKeys.path) let incarnation = try container.decode(UInt32.self, forKey: ActorCoding.CodingKeys.incarnation) @@ -108,7 +108,7 @@ extension ActorID: _ProtobufRepresentable { let encodeCustomMetadata = metadataSettings.encodeCustomMetadata var proto = _ProtoActorID() - let node = self.uniqueNode + let node = self.node proto.node = try node.toProto(context: context) proto.path.segments = self.segments.map(\.value) @@ -147,11 +147,11 @@ extension ActorID: _ProtobufRepresentable { } public init(fromProto proto: _ProtoActorID, context: Serialization.Context) throws { - let uniqueNode: UniqueNode = try .init(fromProto: proto.node, context: context) + let node: Cluster.Node = try .init(fromProto: proto.node, context: context) let path = try ActorPath(proto.path.segments.map { try ActorPathSegment($0) }) - self.init(remote: uniqueNode, path: path, incarnation: ActorIncarnation(proto.incarnation)) + self.init(remote: node, path: path, incarnation: ActorIncarnation(proto.incarnation)) // Handle well known metadata if !proto.metadata.isEmpty { @@ -185,29 +185,29 @@ extension ActorID: _ProtobufRepresentable { } } -extension UniqueNode: _ProtobufRepresentable { - public typealias ProtobufRepresentation = _ProtoUniqueNode +extension Cluster.Node: _ProtobufRepresentable { + public typealias ProtobufRepresentation = _ProtoClusterNode - public func toProto(context: Serialization.Context) throws -> _ProtoUniqueNode { - var proto = _ProtoUniqueNode() + public func toProto(context: Serialization.Context) throws -> _ProtoClusterNode { + var proto = _ProtoClusterNode() proto.nid = self.nid.value - proto.node.protocol = self.node.protocol - proto.node.system = self.node.systemName - proto.node.hostname = self.node.host - proto.node.port = UInt32(self.node.port) + proto.endpoint.protocol = self.endpoint.protocol + proto.endpoint.system = self.endpoint.systemName + proto.endpoint.hostname = self.endpoint.host + proto.endpoint.port = UInt32(self.endpoint.port) return proto } - public init(fromProto proto: _ProtoUniqueNode, context: Serialization.Context) throws { - let node = Node( - protocol: proto.node.protocol, - systemName: proto.node.system, - host: proto.node.hostname, - port: Int(proto.node.port) + public init(fromProto proto: _ProtoClusterNode, context: Serialization.Context) throws { + let endpoint = Cluster.Endpoint( + protocol: proto.endpoint.protocol, + systemName: proto.endpoint.system, + host: proto.endpoint.hostname, + port: Int(proto.endpoint.port) ) - self = .init(node: node, nid: UniqueNodeID(proto.nid)) + self = .init(endpoint: endpoint, nid: Cluster.Node.ID(proto.nid)) } } @@ -243,34 +243,34 @@ extension _ProtoActorPath { } // ==== ---------------------------------------------------------------------------------------------------------------- -// MARK: _ProtoUniqueNode +// MARK: _ProtoClusterNode -extension UniqueNode { - init(_ proto: _ProtoUniqueNode) throws { - guard proto.hasNode else { - throw SerializationError(.missingField("address", type: String(describing: UniqueNode.self))) +extension Cluster.Node { + init(_ proto: _ProtoClusterNode) throws { + guard proto.hasEndpoint else { + throw SerializationError(.missingField("endpoint", type: String(describing: Cluster.Node.self))) } guard proto.nid != 0 else { - throw SerializationError(.missingField("uid", type: String(describing: UniqueNode.self))) + throw SerializationError(.missingField("uid", type: String(describing: Cluster.Node.self))) } - let node = Node(proto.node) - let nid = UniqueNodeID(proto.nid) - self.init(node: node, nid: nid) + let endpoint = Cluster.Endpoint(proto.endpoint) + let nid = Cluster.Node.ID(proto.nid) + self.init(endpoint: endpoint, nid: nid) } } -extension _ProtoUniqueNode { - init(_ node: UniqueNode) { - self.node = _ProtoNode(node.node) +extension _ProtoClusterNode { + init(_ node: Cluster.Node) { + self.endpoint = _ProtoClusterEndpoint(node.endpoint) self.nid = node.nid.value } } // ==== ---------------------------------------------------------------------------------------------------------------- -// MARK: _ProtoNode +// MARK: _ProtoClusterEndpoint -extension Node { - init(_ proto: _ProtoNode) { +extension Cluster.Endpoint { + init(_ proto: _ProtoClusterEndpoint) { self.protocol = proto.protocol self.systemName = proto.system self.host = proto.hostname @@ -278,11 +278,11 @@ extension Node { } } -extension _ProtoNode { - init(_ node: Node) { - self.protocol = node.protocol - self.system = node.systemName - self.hostname = node.host - self.port = UInt32(node.port) +extension _ProtoClusterEndpoint { + init(_ endpoint: Cluster.Endpoint) { + self.protocol = endpoint.protocol + self.system = endpoint.systemName + self.hostname = endpoint.host + self.port = UInt32(endpoint.port) } } diff --git a/Sources/DistributedCluster/Protobuf/ActorID.pb.swift b/Sources/DistributedCluster/Protobuf/ActorID.pb.swift index 26ff29742..b663421da 100644 --- a/Sources/DistributedCluster/Protobuf/ActorID.pb.swift +++ b/Sources/DistributedCluster/Protobuf/ActorID.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: ActorID.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -39,34 +38,39 @@ public struct _ProtoActorID { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var node: _ProtoUniqueNode { - get {return _node ?? _ProtoUniqueNode()} - set {_node = newValue} + public var node: _ProtoClusterNode { + get {return _storage._node ?? _ProtoClusterNode()} + set {_uniqueStorage()._node = newValue} } /// Returns true if `node` has been explicitly set. - public var hasNode: Bool {return self._node != nil} + public var hasNode: Bool {return _storage._node != nil} /// Clears the value of `node`. Subsequent reads from it will return its default value. - public mutating func clearNode() {self._node = nil} + public mutating func clearNode() {_uniqueStorage()._node = nil} public var path: _ProtoActorPath { - get {return _path ?? _ProtoActorPath()} - set {_path = newValue} + get {return _storage._path ?? _ProtoActorPath()} + set {_uniqueStorage()._path = newValue} } /// Returns true if `path` has been explicitly set. - public var hasPath: Bool {return self._path != nil} + public var hasPath: Bool {return _storage._path != nil} /// Clears the value of `path`. Subsequent reads from it will return its default value. - public mutating func clearPath() {self._path = nil} + public mutating func clearPath() {_uniqueStorage()._path = nil} - public var incarnation: UInt32 = 0 + public var incarnation: UInt32 { + get {return _storage._incarnation} + set {_uniqueStorage()._incarnation = newValue} + } - public var metadata: Dictionary = [:] + public var metadata: Dictionary { + get {return _storage._metadata} + set {_uniqueStorage()._metadata = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _node: _ProtoUniqueNode? = nil - fileprivate var _path: _ProtoActorPath? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoActorPath { @@ -81,30 +85,33 @@ public struct _ProtoActorPath { public init() {} } -public struct _ProtoUniqueNode { +public struct _ProtoClusterNode { // SwiftProtobuf.Message conformance is added in an extension below. See the // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var node: _ProtoNode { - get {return _node ?? _ProtoNode()} - set {_node = newValue} + public var endpoint: _ProtoClusterEndpoint { + get {return _storage._endpoint ?? _ProtoClusterEndpoint()} + set {_uniqueStorage()._endpoint = newValue} + } + /// Returns true if `endpoint` has been explicitly set. + public var hasEndpoint: Bool {return _storage._endpoint != nil} + /// Clears the value of `endpoint`. Subsequent reads from it will return its default value. + public mutating func clearEndpoint() {_uniqueStorage()._endpoint = nil} + + public var nid: UInt64 { + get {return _storage._nid} + set {_uniqueStorage()._nid = newValue} } - /// Returns true if `node` has been explicitly set. - public var hasNode: Bool {return self._node != nil} - /// Clears the value of `node`. Subsequent reads from it will return its default value. - public mutating func clearNode() {self._node = nil} - - public var nid: UInt64 = 0 public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _node: _ProtoNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } -public struct _ProtoNode { +public struct _ProtoClusterEndpoint { // SwiftProtobuf.Message conformance is added in an extension below. See the // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. @@ -133,42 +140,77 @@ extension _ProtoActorID: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementa 4: .same(proto: "metadata"), ] + fileprivate class _StorageClass { + var _node: _ProtoClusterNode? = nil + var _path: _ProtoActorPath? = nil + var _incarnation: UInt32 = 0 + var _metadata: Dictionary = [:] + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _node = source._node + _path = source._path + _incarnation = source._incarnation + _metadata = source._metadata + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._node) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._path) }() - case 3: try { try decoder.decodeSingularUInt32Field(value: &self.incarnation) }() - case 4: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.metadata) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._node) + case 2: try decoder.decodeSingularMessageField(value: &_storage._path) + case 3: try decoder.decodeSingularUInt32Field(value: &_storage._incarnation) + case 4: try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &_storage._metadata) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._node { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._path { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if self.incarnation != 0 { - try visitor.visitSingularUInt32Field(value: self.incarnation, fieldNumber: 3) - } - if !self.metadata.isEmpty { - try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.metadata, fieldNumber: 4) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._node { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._path { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if _storage._incarnation != 0 { + try visitor.visitSingularUInt32Field(value: _storage._incarnation, fieldNumber: 3) + } + if !_storage._metadata.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: _storage._metadata, fieldNumber: 4) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoActorID, rhs: _ProtoActorID) -> Bool { - if lhs._node != rhs._node {return false} - if lhs._path != rhs._path {return false} - if lhs.incarnation != rhs.incarnation {return false} - if lhs.metadata != rhs.metadata {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._node != rhs_storage._node {return false} + if _storage._path != rhs_storage._path {return false} + if _storage._incarnation != rhs_storage._incarnation {return false} + if _storage._metadata != rhs_storage._metadata {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -182,11 +224,8 @@ extension _ProtoActorPath: SwiftProtobuf.Message, SwiftProtobuf._MessageImplemen public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeRepeatedStringField(value: &self.segments) }() + case 1: try decoder.decodeRepeatedStringField(value: &self.segments) default: break } } @@ -206,46 +245,77 @@ extension _ProtoActorPath: SwiftProtobuf.Message, SwiftProtobuf._MessageImplemen } } -extension _ProtoUniqueNode: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - public static let protoMessageName: String = "UniqueNode" +extension _ProtoClusterNode: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "ClusterNode" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ - 1: .same(proto: "node"), + 1: .same(proto: "endpoint"), 2: .same(proto: "nid"), ] + fileprivate class _StorageClass { + var _endpoint: _ProtoClusterEndpoint? = nil + var _nid: UInt64 = 0 + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _endpoint = source._endpoint + _nid = source._nid + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._node) }() - case 2: try { try decoder.decodeSingularUInt64Field(value: &self.nid) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._endpoint) + case 2: try decoder.decodeSingularUInt64Field(value: &_storage._nid) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._node { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.nid != 0 { - try visitor.visitSingularUInt64Field(value: self.nid, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._endpoint { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._nid != 0 { + try visitor.visitSingularUInt64Field(value: _storage._nid, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } - public static func ==(lhs: _ProtoUniqueNode, rhs: _ProtoUniqueNode) -> Bool { - if lhs._node != rhs._node {return false} - if lhs.nid != rhs.nid {return false} + public static func ==(lhs: _ProtoClusterNode, rhs: _ProtoClusterNode) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._endpoint != rhs_storage._endpoint {return false} + if _storage._nid != rhs_storage._nid {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } } -extension _ProtoNode: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { - public static let protoMessageName: String = "Node" +extension _ProtoClusterEndpoint: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + public static let protoMessageName: String = "ClusterEndpoint" public static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "protocol"), 2: .same(proto: "system"), @@ -255,14 +325,11 @@ extension _ProtoNode: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementatio public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularStringField(value: &self.`protocol`) }() - case 2: try { try decoder.decodeSingularStringField(value: &self.system) }() - case 3: try { try decoder.decodeSingularStringField(value: &self.hostname) }() - case 4: try { try decoder.decodeSingularUInt32Field(value: &self.port) }() + case 1: try decoder.decodeSingularStringField(value: &self.`protocol`) + case 2: try decoder.decodeSingularStringField(value: &self.system) + case 3: try decoder.decodeSingularStringField(value: &self.hostname) + case 4: try decoder.decodeSingularUInt32Field(value: &self.port) default: break } } @@ -284,7 +351,7 @@ extension _ProtoNode: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementatio try unknownFields.traverse(visitor: &visitor) } - public static func ==(lhs: _ProtoNode, rhs: _ProtoNode) -> Bool { + public static func ==(lhs: _ProtoClusterEndpoint, rhs: _ProtoClusterEndpoint) -> Bool { if lhs.`protocol` != rhs.`protocol` {return false} if lhs.system != rhs.system {return false} if lhs.hostname != rhs.hostname {return false} diff --git a/Sources/DistributedCluster/Protobuf/SystemMessages.pb.swift b/Sources/DistributedCluster/Protobuf/SystemMessages.pb.swift index ee698ccf3..a83602d74 100644 --- a/Sources/DistributedCluster/Protobuf/SystemMessages.pb.swift +++ b/Sources/DistributedCluster/Protobuf/SystemMessages.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: SystemMessages.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -39,30 +38,33 @@ public struct _ProtoSystemMessage { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - public var payload: _ProtoSystemMessage.OneOf_Payload? = nil + public var payload: OneOf_Payload? { + get {return _storage._payload} + set {_uniqueStorage()._payload = newValue} + } public var watch: _ProtoSystemMessage_Watch { get { - if case .watch(let v)? = payload {return v} + if case .watch(let v)? = _storage._payload {return v} return _ProtoSystemMessage_Watch() } - set {payload = .watch(newValue)} + set {_uniqueStorage()._payload = .watch(newValue)} } public var unwatch: _ProtoSystemMessage_Unwatch { get { - if case .unwatch(let v)? = payload {return v} + if case .unwatch(let v)? = _storage._payload {return v} return _ProtoSystemMessage_Unwatch() } - set {payload = .unwatch(newValue)} + set {_uniqueStorage()._payload = .unwatch(newValue)} } public var terminated: _ProtoSystemMessage_Terminated { get { - if case .terminated(let v)? = payload {return v} + if case .terminated(let v)? = _storage._payload {return v} return _ProtoSystemMessage_Terminated() } - set {payload = .terminated(newValue)} + set {_uniqueStorage()._payload = .terminated(newValue)} } public var unknownFields = SwiftProtobuf.UnknownStorage() @@ -74,22 +76,10 @@ public struct _ProtoSystemMessage { #if !swift(>=4.1) public static func ==(lhs: _ProtoSystemMessage.OneOf_Payload, rhs: _ProtoSystemMessage.OneOf_Payload) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.watch, .watch): return { - guard case .watch(let l) = lhs, case .watch(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.unwatch, .unwatch): return { - guard case .unwatch(let l) = lhs, case .unwatch(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.terminated, .terminated): return { - guard case .terminated(let l) = lhs, case .terminated(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.watch(let l), .watch(let r)): return l == r + case (.unwatch(let l), .unwatch(let r)): return l == r + case (.terminated(let l), .terminated(let r)): return l == r default: return false } } @@ -97,6 +87,8 @@ public struct _ProtoSystemMessage { } public init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSystemMessage_Watch { @@ -105,29 +97,28 @@ public struct _ProtoSystemMessage_Watch { // methods supported on all messages. public var watchee: _ProtoActorID { - get {return _watchee ?? _ProtoActorID()} - set {_watchee = newValue} + get {return _storage._watchee ?? _ProtoActorID()} + set {_uniqueStorage()._watchee = newValue} } /// Returns true if `watchee` has been explicitly set. - public var hasWatchee: Bool {return self._watchee != nil} + public var hasWatchee: Bool {return _storage._watchee != nil} /// Clears the value of `watchee`. Subsequent reads from it will return its default value. - public mutating func clearWatchee() {self._watchee = nil} + public mutating func clearWatchee() {_uniqueStorage()._watchee = nil} public var watcher: _ProtoActorID { - get {return _watcher ?? _ProtoActorID()} - set {_watcher = newValue} + get {return _storage._watcher ?? _ProtoActorID()} + set {_uniqueStorage()._watcher = newValue} } /// Returns true if `watcher` has been explicitly set. - public var hasWatcher: Bool {return self._watcher != nil} + public var hasWatcher: Bool {return _storage._watcher != nil} /// Clears the value of `watcher`. Subsequent reads from it will return its default value. - public mutating func clearWatcher() {self._watcher = nil} + public mutating func clearWatcher() {_uniqueStorage()._watcher = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _watchee: _ProtoActorID? = nil - fileprivate var _watcher: _ProtoActorID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSystemMessage_Unwatch { @@ -136,29 +127,28 @@ public struct _ProtoSystemMessage_Unwatch { // methods supported on all messages. public var watchee: _ProtoActorID { - get {return _watchee ?? _ProtoActorID()} - set {_watchee = newValue} + get {return _storage._watchee ?? _ProtoActorID()} + set {_uniqueStorage()._watchee = newValue} } /// Returns true if `watchee` has been explicitly set. - public var hasWatchee: Bool {return self._watchee != nil} + public var hasWatchee: Bool {return _storage._watchee != nil} /// Clears the value of `watchee`. Subsequent reads from it will return its default value. - public mutating func clearWatchee() {self._watchee = nil} + public mutating func clearWatchee() {_uniqueStorage()._watchee = nil} public var watcher: _ProtoActorID { - get {return _watcher ?? _ProtoActorID()} - set {_watcher = newValue} + get {return _storage._watcher ?? _ProtoActorID()} + set {_uniqueStorage()._watcher = newValue} } /// Returns true if `watcher` has been explicitly set. - public var hasWatcher: Bool {return self._watcher != nil} + public var hasWatcher: Bool {return _storage._watcher != nil} /// Clears the value of `watcher`. Subsequent reads from it will return its default value. - public mutating func clearWatcher() {self._watcher = nil} + public mutating func clearWatcher() {_uniqueStorage()._watcher = nil} public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _watchee: _ProtoActorID? = nil - fileprivate var _watcher: _ProtoActorID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSystemMessage_Terminated { @@ -167,23 +157,29 @@ public struct _ProtoSystemMessage_Terminated { // methods supported on all messages. public var ref: _ProtoActorID { - get {return _ref ?? _ProtoActorID()} - set {_ref = newValue} + get {return _storage._ref ?? _ProtoActorID()} + set {_uniqueStorage()._ref = newValue} } /// Returns true if `ref` has been explicitly set. - public var hasRef: Bool {return self._ref != nil} + public var hasRef: Bool {return _storage._ref != nil} /// Clears the value of `ref`. Subsequent reads from it will return its default value. - public mutating func clearRef() {self._ref = nil} + public mutating func clearRef() {_uniqueStorage()._ref = nil} - public var existenceConfirmed: Bool = false + public var existenceConfirmed: Bool { + get {return _storage._existenceConfirmed} + set {_uniqueStorage()._existenceConfirmed = newValue} + } - public var idTerminated: Bool = false + public var idTerminated: Bool { + get {return _storage._idTerminated} + set {_uniqueStorage()._idTerminated = newValue} + } public var unknownFields = SwiftProtobuf.UnknownStorage() public init() {} - fileprivate var _ref: _ProtoActorID? = nil + fileprivate var _storage = _StorageClass.defaultInstance } public struct _ProtoSystemMessageACK { @@ -247,80 +243,85 @@ extension _ProtoSystemMessage: SwiftProtobuf.Message, SwiftProtobuf._MessageImpl 3: .same(proto: "terminated"), ] + fileprivate class _StorageClass { + var _payload: _ProtoSystemMessage.OneOf_Payload? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _payload = source._payload + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoSystemMessage_Watch? - var hadOneofValue = false - if let current = self.payload { - hadOneofValue = true - if case .watch(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.payload = .watch(v) - } - }() - case 2: try { - var v: _ProtoSystemMessage_Unwatch? - var hadOneofValue = false - if let current = self.payload { - hadOneofValue = true - if case .unwatch(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.payload = .unwatch(v) - } - }() - case 3: try { - var v: _ProtoSystemMessage_Terminated? - var hadOneofValue = false - if let current = self.payload { - hadOneofValue = true - if case .terminated(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.payload = .terminated(v) + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoSystemMessage_Watch? + if let current = _storage._payload { + try decoder.handleConflictingOneOf() + if case .watch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._payload = .watch(v)} + case 2: + var v: _ProtoSystemMessage_Unwatch? + if let current = _storage._payload { + try decoder.handleConflictingOneOf() + if case .unwatch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._payload = .unwatch(v)} + case 3: + var v: _ProtoSystemMessage_Terminated? + if let current = _storage._payload { + try decoder.handleConflictingOneOf() + if case .terminated(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._payload = .terminated(v)} + default: break } - }() - default: break } } } public func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.payload { - case .watch?: try { - guard case .watch(let v)? = self.payload else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .unwatch?: try { - guard case .unwatch(let v)? = self.payload else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case .terminated?: try { - guard case .terminated(let v)? = self.payload else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._payload { + case .watch(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .unwatch(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case .terminated(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSystemMessage, rhs: _ProtoSystemMessage) -> Bool { - if lhs.payload != rhs.payload {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._payload != rhs_storage._payload {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -333,32 +334,63 @@ extension _ProtoSystemMessage_Watch: SwiftProtobuf.Message, SwiftProtobuf._Messa 2: .same(proto: "watcher"), ] + fileprivate class _StorageClass { + var _watchee: _ProtoActorID? = nil + var _watcher: _ProtoActorID? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _watchee = source._watchee + _watcher = source._watcher + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._watchee) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._watcher) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._watchee) + case 2: try decoder.decodeSingularMessageField(value: &_storage._watcher) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._watchee { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._watcher { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._watchee { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._watcher { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSystemMessage_Watch, rhs: _ProtoSystemMessage_Watch) -> Bool { - if lhs._watchee != rhs._watchee {return false} - if lhs._watcher != rhs._watcher {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._watchee != rhs_storage._watchee {return false} + if _storage._watcher != rhs_storage._watcher {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -371,32 +403,63 @@ extension _ProtoSystemMessage_Unwatch: SwiftProtobuf.Message, SwiftProtobuf._Mes 2: .same(proto: "watcher"), ] + fileprivate class _StorageClass { + var _watchee: _ProtoActorID? = nil + var _watcher: _ProtoActorID? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _watchee = source._watchee + _watcher = source._watcher + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._watchee) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._watcher) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._watchee) + case 2: try decoder.decodeSingularMessageField(value: &_storage._watcher) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._watchee { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._watcher { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._watchee { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._watcher { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSystemMessage_Unwatch, rhs: _ProtoSystemMessage_Unwatch) -> Bool { - if lhs._watchee != rhs._watchee {return false} - if lhs._watcher != rhs._watcher {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._watchee != rhs_storage._watchee {return false} + if _storage._watcher != rhs_storage._watcher {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -410,37 +473,70 @@ extension _ProtoSystemMessage_Terminated: SwiftProtobuf.Message, SwiftProtobuf._ 3: .same(proto: "idTerminated"), ] + fileprivate class _StorageClass { + var _ref: _ProtoActorID? = nil + var _existenceConfirmed: Bool = false + var _idTerminated: Bool = false + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _ref = source._ref + _existenceConfirmed = source._existenceConfirmed + _idTerminated = source._idTerminated + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + public mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._ref) }() - case 2: try { try decoder.decodeSingularBoolField(value: &self.existenceConfirmed) }() - case 3: try { try decoder.decodeSingularBoolField(value: &self.idTerminated) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._ref) + case 2: try decoder.decodeSingularBoolField(value: &_storage._existenceConfirmed) + case 3: try decoder.decodeSingularBoolField(value: &_storage._idTerminated) + default: break + } } } } public func traverse(visitor: inout V) throws { - if let v = self._ref { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if self.existenceConfirmed != false { - try visitor.visitSingularBoolField(value: self.existenceConfirmed, fieldNumber: 2) - } - if self.idTerminated != false { - try visitor.visitSingularBoolField(value: self.idTerminated, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._ref { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if _storage._existenceConfirmed != false { + try visitor.visitSingularBoolField(value: _storage._existenceConfirmed, fieldNumber: 2) + } + if _storage._idTerminated != false { + try visitor.visitSingularBoolField(value: _storage._idTerminated, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } public static func ==(lhs: _ProtoSystemMessage_Terminated, rhs: _ProtoSystemMessage_Terminated) -> Bool { - if lhs._ref != rhs._ref {return false} - if lhs.existenceConfirmed != rhs.existenceConfirmed {return false} - if lhs.idTerminated != rhs.idTerminated {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._ref != rhs_storage._ref {return false} + if _storage._existenceConfirmed != rhs_storage._existenceConfirmed {return false} + if _storage._idTerminated != rhs_storage._idTerminated {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -454,11 +550,8 @@ extension _ProtoSystemMessageACK: SwiftProtobuf.Message, SwiftProtobuf._MessageI public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) }() + case 1: try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) default: break } } @@ -486,11 +579,8 @@ extension _ProtoSystemMessageNACK: SwiftProtobuf.Message, SwiftProtobuf._Message public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) }() + case 1: try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) default: break } } @@ -542,12 +632,9 @@ extension _ProtoSystemMessageEnvelope: SwiftProtobuf.Message, SwiftProtobuf._Mes _ = _uniqueStorage() try withExtendedLifetime(_storage) { (_storage: _StorageClass) in while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &_storage._sequenceNr) }() - case 2: try { try decoder.decodeSingularMessageField(value: &_storage._message) }() + case 1: try decoder.decodeSingularUInt64Field(value: &_storage._sequenceNr) + case 2: try decoder.decodeSingularMessageField(value: &_storage._message) default: break } } diff --git a/Sources/DistributedCluster/Protobuf/WireProtocol+Serialization.swift b/Sources/DistributedCluster/Protobuf/WireProtocol+Serialization.swift index 339ff0144..304c27058 100644 --- a/Sources/DistributedCluster/Protobuf/WireProtocol+Serialization.swift +++ b/Sources/DistributedCluster/Protobuf/WireProtocol+Serialization.swift @@ -144,15 +144,15 @@ extension Wire.HandshakeOffer { guard proto.hasOriginNode else { throw SerializationError(.missingField("originNode", type: String(reflecting: Wire.HandshakeOffer.self))) } - guard proto.hasTargetNode else { - throw SerializationError(.missingField("targetNode", type: String(reflecting: Wire.HandshakeOffer.self))) + guard proto.hasTargetEndpoint else { + throw SerializationError(.missingField("targetEndpoint", type: String(reflecting: Wire.HandshakeOffer.self))) } guard proto.hasVersion else { throw SerializationError(.missingField("version", type: String(reflecting: Wire.HandshakeOffer.self))) } - self.originNode = try UniqueNode(proto.originNode) - self.targetNode = Node(proto.targetNode) + self.originNode = try Cluster.Node(proto.originNode) + self.targetEndpoint = Cluster.Endpoint(proto.targetEndpoint) self.version = Wire.Version(reserved: UInt8(proto.version.reserved), major: UInt8(proto.version.major), minor: UInt8(proto.version.minor), patch: UInt8(proto.version.patch)) } } @@ -160,8 +160,8 @@ extension Wire.HandshakeOffer { extension _ProtoHandshakeOffer { init(_ offer: Wire.HandshakeOffer) { self.version = _ProtoProtocolVersion(offer.version) - self.originNode = _ProtoUniqueNode(offer.originNode) - self.targetNode = _ProtoNode(offer.targetNode) + self.originNode = _ProtoClusterNode(offer.originNode) + self.targetEndpoint = _ProtoClusterEndpoint(offer.targetEndpoint) } init(serializedData data: Data) throws { @@ -174,8 +174,8 @@ extension _ProtoHandshakeOffer { guard proto.hasOriginNode else { throw SerializationError(.missingField("hasOriginNode", type: String(reflecting: Wire.HandshakeOffer.self))) } - guard proto.hasTargetNode else { - throw SerializationError(.missingField("targetNode", type: String(reflecting: Wire.HandshakeOffer.self))) + guard proto.hasTargetEndpoint else { + throw SerializationError(.missingField("targetEndpoint", type: String(reflecting: Wire.HandshakeOffer.self))) } self = proto diff --git a/Sources/DistributedCluster/Protobuf/WireProtocol.pb.swift b/Sources/DistributedCluster/Protobuf/WireProtocol.pb.swift index 55cfc2514..333ecaaef 100644 --- a/Sources/DistributedCluster/Protobuf/WireProtocol.pb.swift +++ b/Sources/DistributedCluster/Protobuf/WireProtocol.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: WireProtocol.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -40,44 +39,42 @@ struct _ProtoHandshakeOffer { // methods supported on all messages. var version: _ProtoProtocolVersion { - get {return _version ?? _ProtoProtocolVersion()} - set {_version = newValue} + get {return _storage._version ?? _ProtoProtocolVersion()} + set {_uniqueStorage()._version = newValue} } /// Returns true if `version` has been explicitly set. - var hasVersion: Bool {return self._version != nil} + var hasVersion: Bool {return _storage._version != nil} /// Clears the value of `version`. Subsequent reads from it will return its default value. - mutating func clearVersion() {self._version = nil} + mutating func clearVersion() {_uniqueStorage()._version = nil} - var originNode: _ProtoUniqueNode { - get {return _originNode ?? _ProtoUniqueNode()} - set {_originNode = newValue} + var originNode: _ProtoClusterNode { + get {return _storage._originNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._originNode = newValue} } /// Returns true if `originNode` has been explicitly set. - var hasOriginNode: Bool {return self._originNode != nil} + var hasOriginNode: Bool {return _storage._originNode != nil} /// Clears the value of `originNode`. Subsequent reads from it will return its default value. - mutating func clearOriginNode() {self._originNode = nil} + mutating func clearOriginNode() {_uniqueStorage()._originNode = nil} /// In the future we may want to add additional information /// about certain capabilities here. E.g. when a node supports /// faster transport like InfiniBand and the likes, so we can /// upgrade the connection in case both nodes support the fast /// transport. - var targetNode: _ProtoNode { - get {return _targetNode ?? _ProtoNode()} - set {_targetNode = newValue} + var targetEndpoint: _ProtoClusterEndpoint { + get {return _storage._targetEndpoint ?? _ProtoClusterEndpoint()} + set {_uniqueStorage()._targetEndpoint = newValue} } - /// Returns true if `targetNode` has been explicitly set. - var hasTargetNode: Bool {return self._targetNode != nil} - /// Clears the value of `targetNode`. Subsequent reads from it will return its default value. - mutating func clearTargetNode() {self._targetNode = nil} + /// Returns true if `targetEndpoint` has been explicitly set. + var hasTargetEndpoint: Bool {return _storage._targetEndpoint != nil} + /// Clears the value of `targetEndpoint`. Subsequent reads from it will return its default value. + mutating func clearTargetEndpoint() {_uniqueStorage()._targetEndpoint = nil} var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _version: _ProtoProtocolVersion? = nil - fileprivate var _originNode: _ProtoUniqueNode? = nil - fileprivate var _targetNode: _ProtoNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } struct _ProtoHandshakeResponse { @@ -85,22 +82,25 @@ struct _ProtoHandshakeResponse { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - var status: _ProtoHandshakeResponse.OneOf_Status? = nil + var status: OneOf_Status? { + get {return _storage._status} + set {_uniqueStorage()._status = newValue} + } var accept: _ProtoHandshakeAccept { get { - if case .accept(let v)? = status {return v} + if case .accept(let v)? = _storage._status {return v} return _ProtoHandshakeAccept() } - set {status = .accept(newValue)} + set {_uniqueStorage()._status = .accept(newValue)} } var reject: _ProtoHandshakeReject { get { - if case .reject(let v)? = status {return v} + if case .reject(let v)? = _storage._status {return v} return _ProtoHandshakeReject() } - set {status = .reject(newValue)} + set {_uniqueStorage()._status = .reject(newValue)} } var unknownFields = SwiftProtobuf.UnknownStorage() @@ -111,18 +111,9 @@ struct _ProtoHandshakeResponse { #if !swift(>=4.1) static func ==(lhs: _ProtoHandshakeResponse.OneOf_Status, rhs: _ProtoHandshakeResponse.OneOf_Status) -> Bool { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch (lhs, rhs) { - case (.accept, .accept): return { - guard case .accept(let l) = lhs, case .accept(let r) = rhs else { preconditionFailure() } - return l == r - }() - case (.reject, .reject): return { - guard case .reject(let l) = lhs, case .reject(let r) = rhs else { preconditionFailure() } - return l == r - }() + case (.accept(let l), .accept(let r)): return l == r + case (.reject(let l), .reject(let r)): return l == r default: return false } } @@ -130,6 +121,8 @@ struct _ProtoHandshakeResponse { } init() {} + + fileprivate var _storage = _StorageClass.defaultInstance } struct _ProtoHandshakeAccept { @@ -138,39 +131,37 @@ struct _ProtoHandshakeAccept { // methods supported on all messages. var version: _ProtoProtocolVersion { - get {return _version ?? _ProtoProtocolVersion()} - set {_version = newValue} + get {return _storage._version ?? _ProtoProtocolVersion()} + set {_uniqueStorage()._version = newValue} } /// Returns true if `version` has been explicitly set. - var hasVersion: Bool {return self._version != nil} + var hasVersion: Bool {return _storage._version != nil} /// Clears the value of `version`. Subsequent reads from it will return its default value. - mutating func clearVersion() {self._version = nil} + mutating func clearVersion() {_uniqueStorage()._version = nil} - var originNode: _ProtoUniqueNode { - get {return _originNode ?? _ProtoUniqueNode()} - set {_originNode = newValue} + var originNode: _ProtoClusterNode { + get {return _storage._originNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._originNode = newValue} } /// Returns true if `originNode` has been explicitly set. - var hasOriginNode: Bool {return self._originNode != nil} + var hasOriginNode: Bool {return _storage._originNode != nil} /// Clears the value of `originNode`. Subsequent reads from it will return its default value. - mutating func clearOriginNode() {self._originNode = nil} + mutating func clearOriginNode() {_uniqueStorage()._originNode = nil} - var targetNode: _ProtoUniqueNode { - get {return _targetNode ?? _ProtoUniqueNode()} - set {_targetNode = newValue} + var targetNode: _ProtoClusterNode { + get {return _storage._targetNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._targetNode = newValue} } /// Returns true if `targetNode` has been explicitly set. - var hasTargetNode: Bool {return self._targetNode != nil} + var hasTargetNode: Bool {return _storage._targetNode != nil} /// Clears the value of `targetNode`. Subsequent reads from it will return its default value. - mutating func clearTargetNode() {self._targetNode = nil} + mutating func clearTargetNode() {_uniqueStorage()._targetNode = nil} var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _version: _ProtoProtocolVersion? = nil - fileprivate var _originNode: _ProtoUniqueNode? = nil - fileprivate var _targetNode: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } struct _ProtoHandshakeReject { @@ -179,41 +170,42 @@ struct _ProtoHandshakeReject { // methods supported on all messages. var version: _ProtoProtocolVersion { - get {return _version ?? _ProtoProtocolVersion()} - set {_version = newValue} + get {return _storage._version ?? _ProtoProtocolVersion()} + set {_uniqueStorage()._version = newValue} } /// Returns true if `version` has been explicitly set. - var hasVersion: Bool {return self._version != nil} + var hasVersion: Bool {return _storage._version != nil} /// Clears the value of `version`. Subsequent reads from it will return its default value. - mutating func clearVersion() {self._version = nil} + mutating func clearVersion() {_uniqueStorage()._version = nil} - var originNode: _ProtoUniqueNode { - get {return _originNode ?? _ProtoUniqueNode()} - set {_originNode = newValue} + var originNode: _ProtoClusterNode { + get {return _storage._originNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._originNode = newValue} } /// Returns true if `originNode` has been explicitly set. - var hasOriginNode: Bool {return self._originNode != nil} + var hasOriginNode: Bool {return _storage._originNode != nil} /// Clears the value of `originNode`. Subsequent reads from it will return its default value. - mutating func clearOriginNode() {self._originNode = nil} + mutating func clearOriginNode() {_uniqueStorage()._originNode = nil} - var targetNode: _ProtoUniqueNode { - get {return _targetNode ?? _ProtoUniqueNode()} - set {_targetNode = newValue} + var targetNode: _ProtoClusterNode { + get {return _storage._targetNode ?? _ProtoClusterNode()} + set {_uniqueStorage()._targetNode = newValue} } /// Returns true if `targetNode` has been explicitly set. - var hasTargetNode: Bool {return self._targetNode != nil} + var hasTargetNode: Bool {return _storage._targetNode != nil} /// Clears the value of `targetNode`. Subsequent reads from it will return its default value. - mutating func clearTargetNode() {self._targetNode = nil} + mutating func clearTargetNode() {_uniqueStorage()._targetNode = nil} - var reason: String = String() + var reason: String { + get {return _storage._reason} + set {_uniqueStorage()._reason = newValue} + } var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _version: _ProtoProtocolVersion? = nil - fileprivate var _originNode: _ProtoUniqueNode? = nil - fileprivate var _targetNode: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } struct _ProtoEnvelope { @@ -222,31 +214,33 @@ struct _ProtoEnvelope { // methods supported on all messages. var recipient: _ProtoActorID { - get {return _recipient ?? _ProtoActorID()} - set {_recipient = newValue} + get {return _storage._recipient ?? _ProtoActorID()} + set {_uniqueStorage()._recipient = newValue} } /// Returns true if `recipient` has been explicitly set. - var hasRecipient: Bool {return self._recipient != nil} + var hasRecipient: Bool {return _storage._recipient != nil} /// Clears the value of `recipient`. Subsequent reads from it will return its default value. - mutating func clearRecipient() {self._recipient = nil} + mutating func clearRecipient() {_uniqueStorage()._recipient = nil} var manifest: _ProtoManifest { - get {return _manifest ?? _ProtoManifest()} - set {_manifest = newValue} + get {return _storage._manifest ?? _ProtoManifest()} + set {_uniqueStorage()._manifest = newValue} } /// Returns true if `manifest` has been explicitly set. - var hasManifest: Bool {return self._manifest != nil} + var hasManifest: Bool {return _storage._manifest != nil} /// Clears the value of `manifest`. Subsequent reads from it will return its default value. - mutating func clearManifest() {self._manifest = nil} + mutating func clearManifest() {_uniqueStorage()._manifest = nil} - var payload: Data = Data() + var payload: Data { + get {return _storage._payload} + set {_uniqueStorage()._payload = newValue} + } var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _recipient: _ProtoActorID? = nil - fileprivate var _manifest: _ProtoManifest? = nil + fileprivate var _storage = _StorageClass.defaultInstance } /// System messages have to be reliable, therefore they need to be acknowledged @@ -256,34 +250,39 @@ struct _ProtoSystemEnvelope { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - var sequenceNr: UInt64 = 0 + var sequenceNr: UInt64 { + get {return _storage._sequenceNr} + set {_uniqueStorage()._sequenceNr = newValue} + } - var from: _ProtoUniqueNode { - get {return _from ?? _ProtoUniqueNode()} - set {_from = newValue} + var from: _ProtoClusterNode { + get {return _storage._from ?? _ProtoClusterNode()} + set {_uniqueStorage()._from = newValue} } /// Returns true if `from` has been explicitly set. - var hasFrom: Bool {return self._from != nil} + var hasFrom: Bool {return _storage._from != nil} /// Clears the value of `from`. Subsequent reads from it will return its default value. - mutating func clearFrom() {self._from = nil} + mutating func clearFrom() {_uniqueStorage()._from = nil} var manifest: _ProtoManifest { - get {return _manifest ?? _ProtoManifest()} - set {_manifest = newValue} + get {return _storage._manifest ?? _ProtoManifest()} + set {_uniqueStorage()._manifest = newValue} } /// Returns true if `manifest` has been explicitly set. - var hasManifest: Bool {return self._manifest != nil} + var hasManifest: Bool {return _storage._manifest != nil} /// Clears the value of `manifest`. Subsequent reads from it will return its default value. - mutating func clearManifest() {self._manifest = nil} + mutating func clearManifest() {_uniqueStorage()._manifest = nil} - var payload: Data = Data() + var payload: Data { + get {return _storage._payload} + set {_uniqueStorage()._payload = newValue} + } var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _from: _ProtoUniqueNode? = nil - fileprivate var _manifest: _ProtoManifest? = nil + fileprivate var _storage = _StorageClass.defaultInstance } struct _ProtoSystemAck { @@ -291,22 +290,25 @@ struct _ProtoSystemAck { // `Message` and `Message+*Additions` files in the SwiftProtobuf library for // methods supported on all messages. - var sequenceNr: UInt64 = 0 + var sequenceNr: UInt64 { + get {return _storage._sequenceNr} + set {_uniqueStorage()._sequenceNr = newValue} + } - var from: _ProtoUniqueNode { - get {return _from ?? _ProtoUniqueNode()} - set {_from = newValue} + var from: _ProtoClusterNode { + get {return _storage._from ?? _ProtoClusterNode()} + set {_uniqueStorage()._from = newValue} } /// Returns true if `from` has been explicitly set. - var hasFrom: Bool {return self._from != nil} + var hasFrom: Bool {return _storage._from != nil} /// Clears the value of `from`. Subsequent reads from it will return its default value. - mutating func clearFrom() {self._from = nil} + mutating func clearFrom() {_uniqueStorage()._from = nil} var unknownFields = SwiftProtobuf.UnknownStorage() init() {} - fileprivate var _from: _ProtoUniqueNode? = nil + fileprivate var _storage = _StorageClass.defaultInstance } /// The version is represented as 4 bytes: @@ -343,40 +345,73 @@ extension _ProtoHandshakeOffer: SwiftProtobuf.Message, SwiftProtobuf._MessageImp static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ 1: .same(proto: "version"), 2: .same(proto: "originNode"), - 3: .same(proto: "targetNode"), + 3: .same(proto: "targetEndpoint"), ] + fileprivate class _StorageClass { + var _version: _ProtoProtocolVersion? = nil + var _originNode: _ProtoClusterNode? = nil + var _targetEndpoint: _ProtoClusterEndpoint? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _version = source._version + _originNode = source._originNode + _targetEndpoint = source._targetEndpoint + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._version) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._originNode) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._targetNode) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._version) + case 2: try decoder.decodeSingularMessageField(value: &_storage._originNode) + case 3: try decoder.decodeSingularMessageField(value: &_storage._targetEndpoint) + default: break + } } } } func traverse(visitor: inout V) throws { - if let v = self._version { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._originNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if let v = self._targetNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._version { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._originNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if let v = _storage._targetEndpoint { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoHandshakeOffer, rhs: _ProtoHandshakeOffer) -> Bool { - if lhs._version != rhs._version {return false} - if lhs._originNode != rhs._originNode {return false} - if lhs._targetNode != rhs._targetNode {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._version != rhs_storage._version {return false} + if _storage._originNode != rhs_storage._originNode {return false} + if _storage._targetEndpoint != rhs_storage._targetEndpoint {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -389,63 +424,75 @@ extension _ProtoHandshakeResponse: SwiftProtobuf.Message, SwiftProtobuf._Message 2: .same(proto: "reject"), ] + fileprivate class _StorageClass { + var _status: _ProtoHandshakeResponse.OneOf_Status? + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _status = source._status + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { - var v: _ProtoHandshakeAccept? - var hadOneofValue = false - if let current = self.status { - hadOneofValue = true - if case .accept(let m) = current {v = m} - } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.status = .accept(v) - } - }() - case 2: try { - var v: _ProtoHandshakeReject? - var hadOneofValue = false - if let current = self.status { - hadOneofValue = true - if case .reject(let m) = current {v = m} + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: + var v: _ProtoHandshakeAccept? + if let current = _storage._status { + try decoder.handleConflictingOneOf() + if case .accept(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._status = .accept(v)} + case 2: + var v: _ProtoHandshakeReject? + if let current = _storage._status { + try decoder.handleConflictingOneOf() + if case .reject(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v {_storage._status = .reject(v)} + default: break } - try decoder.decodeSingularMessageField(value: &v) - if let v = v { - if hadOneofValue {try decoder.handleConflictingOneOf()} - self.status = .reject(v) - } - }() - default: break } } } func traverse(visitor: inout V) throws { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch self.status { - case .accept?: try { - guard case .accept(let v)? = self.status else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - }() - case .reject?: try { - guard case .reject(let v)? = self.status else { preconditionFailure() } - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - }() - case nil: break + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + switch _storage._status { + case .accept(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + case .reject(let v)?: + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + case nil: break + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoHandshakeResponse, rhs: _ProtoHandshakeResponse) -> Bool { - if lhs.status != rhs.status {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._status != rhs_storage._status {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -459,37 +506,70 @@ extension _ProtoHandshakeAccept: SwiftProtobuf.Message, SwiftProtobuf._MessageIm 3: .same(proto: "targetNode"), ] + fileprivate class _StorageClass { + var _version: _ProtoProtocolVersion? = nil + var _originNode: _ProtoClusterNode? = nil + var _targetNode: _ProtoClusterNode? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _version = source._version + _originNode = source._originNode + _targetNode = source._targetNode + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._version) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._originNode) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._targetNode) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._version) + case 2: try decoder.decodeSingularMessageField(value: &_storage._originNode) + case 3: try decoder.decodeSingularMessageField(value: &_storage._targetNode) + default: break + } } } } func traverse(visitor: inout V) throws { - if let v = self._version { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._originNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if let v = self._targetNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._version { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._originNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if let v = _storage._targetNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoHandshakeAccept, rhs: _ProtoHandshakeAccept) -> Bool { - if lhs._version != rhs._version {return false} - if lhs._originNode != rhs._originNode {return false} - if lhs._targetNode != rhs._targetNode {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._version != rhs_storage._version {return false} + if _storage._originNode != rhs_storage._originNode {return false} + if _storage._targetNode != rhs_storage._targetNode {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -504,42 +584,77 @@ extension _ProtoHandshakeReject: SwiftProtobuf.Message, SwiftProtobuf._MessageIm 4: .same(proto: "reason"), ] + fileprivate class _StorageClass { + var _version: _ProtoProtocolVersion? = nil + var _originNode: _ProtoClusterNode? = nil + var _targetNode: _ProtoClusterNode? = nil + var _reason: String = String() + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _version = source._version + _originNode = source._originNode + _targetNode = source._targetNode + _reason = source._reason + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._version) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._originNode) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._targetNode) }() - case 4: try { try decoder.decodeSingularStringField(value: &self.reason) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._version) + case 2: try decoder.decodeSingularMessageField(value: &_storage._originNode) + case 3: try decoder.decodeSingularMessageField(value: &_storage._targetNode) + case 4: try decoder.decodeSingularStringField(value: &_storage._reason) + default: break + } } } } func traverse(visitor: inout V) throws { - if let v = self._version { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._originNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if let v = self._targetNode { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - } - if !self.reason.isEmpty { - try visitor.visitSingularStringField(value: self.reason, fieldNumber: 4) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._version { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._originNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if let v = _storage._targetNode { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } + if !_storage._reason.isEmpty { + try visitor.visitSingularStringField(value: _storage._reason, fieldNumber: 4) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoHandshakeReject, rhs: _ProtoHandshakeReject) -> Bool { - if lhs._version != rhs._version {return false} - if lhs._originNode != rhs._originNode {return false} - if lhs._targetNode != rhs._targetNode {return false} - if lhs.reason != rhs.reason {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._version != rhs_storage._version {return false} + if _storage._originNode != rhs_storage._originNode {return false} + if _storage._targetNode != rhs_storage._targetNode {return false} + if _storage._reason != rhs_storage._reason {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -553,37 +668,70 @@ extension _ProtoEnvelope: SwiftProtobuf.Message, SwiftProtobuf._MessageImplement 3: .same(proto: "payload"), ] + fileprivate class _StorageClass { + var _recipient: _ProtoActorID? = nil + var _manifest: _ProtoManifest? = nil + var _payload: Data = SwiftProtobuf.Internal.emptyData + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _recipient = source._recipient + _manifest = source._manifest + _payload = source._payload + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularMessageField(value: &self._recipient) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._manifest) }() - case 3: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularMessageField(value: &_storage._recipient) + case 2: try decoder.decodeSingularMessageField(value: &_storage._manifest) + case 3: try decoder.decodeSingularBytesField(value: &_storage._payload) + default: break + } } } } func traverse(visitor: inout V) throws { - if let v = self._recipient { - try visitor.visitSingularMessageField(value: v, fieldNumber: 1) - } - if let v = self._manifest { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 3) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if let v = _storage._recipient { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } + if let v = _storage._manifest { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if !_storage._payload.isEmpty { + try visitor.visitSingularBytesField(value: _storage._payload, fieldNumber: 3) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoEnvelope, rhs: _ProtoEnvelope) -> Bool { - if lhs._recipient != rhs._recipient {return false} - if lhs._manifest != rhs._manifest {return false} - if lhs.payload != rhs.payload {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._recipient != rhs_storage._recipient {return false} + if _storage._manifest != rhs_storage._manifest {return false} + if _storage._payload != rhs_storage._payload {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -598,42 +746,77 @@ extension _ProtoSystemEnvelope: SwiftProtobuf.Message, SwiftProtobuf._MessageImp 4: .same(proto: "payload"), ] + fileprivate class _StorageClass { + var _sequenceNr: UInt64 = 0 + var _from: _ProtoClusterNode? = nil + var _manifest: _ProtoManifest? = nil + var _payload: Data = SwiftProtobuf.Internal.emptyData + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _sequenceNr = source._sequenceNr + _from = source._from + _manifest = source._manifest + _payload = source._payload + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._from) }() - case 3: try { try decoder.decodeSingularMessageField(value: &self._manifest) }() - case 4: try { try decoder.decodeSingularBytesField(value: &self.payload) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularUInt64Field(value: &_storage._sequenceNr) + case 2: try decoder.decodeSingularMessageField(value: &_storage._from) + case 3: try decoder.decodeSingularMessageField(value: &_storage._manifest) + case 4: try decoder.decodeSingularBytesField(value: &_storage._payload) + default: break + } } } } func traverse(visitor: inout V) throws { - if self.sequenceNr != 0 { - try visitor.visitSingularUInt64Field(value: self.sequenceNr, fieldNumber: 1) - } - if let v = self._from { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) - } - if let v = self._manifest { - try visitor.visitSingularMessageField(value: v, fieldNumber: 3) - } - if !self.payload.isEmpty { - try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 4) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if _storage._sequenceNr != 0 { + try visitor.visitSingularUInt64Field(value: _storage._sequenceNr, fieldNumber: 1) + } + if let v = _storage._from { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } + if let v = _storage._manifest { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } + if !_storage._payload.isEmpty { + try visitor.visitSingularBytesField(value: _storage._payload, fieldNumber: 4) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoSystemEnvelope, rhs: _ProtoSystemEnvelope) -> Bool { - if lhs.sequenceNr != rhs.sequenceNr {return false} - if lhs._from != rhs._from {return false} - if lhs._manifest != rhs._manifest {return false} - if lhs.payload != rhs.payload {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._sequenceNr != rhs_storage._sequenceNr {return false} + if _storage._from != rhs_storage._from {return false} + if _storage._manifest != rhs_storage._manifest {return false} + if _storage._payload != rhs_storage._payload {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -646,32 +829,63 @@ extension _ProtoSystemAck: SwiftProtobuf.Message, SwiftProtobuf._MessageImplemen 2: .same(proto: "from"), ] + fileprivate class _StorageClass { + var _sequenceNr: UInt64 = 0 + var _from: _ProtoClusterNode? = nil + + static let defaultInstance = _StorageClass() + + private init() {} + + init(copying source: _StorageClass) { + _sequenceNr = source._sequenceNr + _from = source._from + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + mutating func decodeMessage(decoder: inout D) throws { - while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 - switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt64Field(value: &self.sequenceNr) }() - case 2: try { try decoder.decodeSingularMessageField(value: &self._from) }() - default: break + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + switch fieldNumber { + case 1: try decoder.decodeSingularUInt64Field(value: &_storage._sequenceNr) + case 2: try decoder.decodeSingularMessageField(value: &_storage._from) + default: break + } } } } func traverse(visitor: inout V) throws { - if self.sequenceNr != 0 { - try visitor.visitSingularUInt64Field(value: self.sequenceNr, fieldNumber: 1) - } - if let v = self._from { - try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + if _storage._sequenceNr != 0 { + try visitor.visitSingularUInt64Field(value: _storage._sequenceNr, fieldNumber: 1) + } + if let v = _storage._from { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } } try unknownFields.traverse(visitor: &visitor) } static func ==(lhs: _ProtoSystemAck, rhs: _ProtoSystemAck) -> Bool { - if lhs.sequenceNr != rhs.sequenceNr {return false} - if lhs._from != rhs._from {return false} + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._sequenceNr != rhs_storage._sequenceNr {return false} + if _storage._from != rhs_storage._from {return false} + return true + } + if !storagesAreEqual {return false} + } if lhs.unknownFields != rhs.unknownFields {return false} return true } @@ -688,14 +902,11 @@ extension _ProtoProtocolVersion: SwiftProtobuf.Message, SwiftProtobuf._MessageIm mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt32Field(value: &self.reserved) }() - case 2: try { try decoder.decodeSingularUInt32Field(value: &self.major) }() - case 3: try { try decoder.decodeSingularUInt32Field(value: &self.minor) }() - case 4: try { try decoder.decodeSingularUInt32Field(value: &self.patch) }() + case 1: try decoder.decodeSingularUInt32Field(value: &self.reserved) + case 2: try decoder.decodeSingularUInt32Field(value: &self.major) + case 3: try decoder.decodeSingularUInt32Field(value: &self.minor) + case 4: try decoder.decodeSingularUInt32Field(value: &self.patch) default: break } } diff --git a/Sources/DistributedCluster/Receptionist/DistributedReceptionist.swift b/Sources/DistributedCluster/Receptionist/DistributedReceptionist.swift index 47a46174f..486aa0470 100644 --- a/Sources/DistributedCluster/Receptionist/DistributedReceptionist.swift +++ b/Sources/DistributedCluster/Receptionist/DistributedReceptionist.swift @@ -152,7 +152,7 @@ extension DistributedReception { } /// Represents a registration of an actor with its (local) receptionist, -/// at the version represented by the `(seqNr, .uniqueNode(actor.
.uniqueNode))`. +/// at the version represented by the `(seqNr, .node(actor.
.node))`. /// /// This allows a local subscriber to definitely compare a registration with its "already seen" /// version vector (that contains versions for every node it is receiving updates from), @@ -162,7 +162,7 @@ internal struct VersionedRegistration: Hashable { let actorID: ClusterSystem.ActorID init(remoteOpSeqNr: UInt64, actor: AnyDistributedActor) { - self.version = VersionVector(remoteOpSeqNr, at: .uniqueNode(actor.id.uniqueNode)) + self.version = VersionVector(remoteOpSeqNr, at: .node(actor.id.node)) self.actorID = actor.id } @@ -192,7 +192,7 @@ internal final class DistributedReceptionistStorage { /// Per (receptionist) node mapping of which keys are presently known to this receptionist on the given node. /// This is used to perform quicker cleanups upon a node/receptionist crashing, and thus all existing references /// on that node should be removed from our storage. - private var _registeredKeysByNode: [UniqueNode: Set] = [:] + private var _registeredKeysByNode: [Cluster.Node: Set] = [:] /// Allows for reverse lookups, when an actor terminates, we know from which registrations to remove it from. internal var _identityToRegisteredKeys: [ClusterSystem.ActorID: Set] = [:] @@ -215,7 +215,7 @@ internal final class DistributedReceptionistStorage { } self.addGuestKeyMapping(identity: guest.id, key: key) - self.storeRegistrationNodeRelation(key: key, node: guest.id.uniqueNode) + self.storeRegistrationNodeRelation(key: key, node: guest.id.node) let versionedRegistration = VersionedRegistration( remoteOpSeqNr: sequenced.sequenceRange.max, @@ -230,7 +230,7 @@ internal final class DistributedReceptionistStorage { let address = guest.id _ = self.removeFromKeyMappings(guest.id) - self.removeSingleRegistrationNodeRelation(key: key, node: address.uniqueNode) + self.removeSingleRegistrationNodeRelation(key: key, node: address.node) let versionedRegistration = VersionedRegistration( forRemovalOf: guest.id @@ -242,13 +242,13 @@ internal final class DistributedReceptionistStorage { self._registrations[key] } - private func storeRegistrationNodeRelation(key: AnyDistributedReceptionKey, node: UniqueNode?) { + private func storeRegistrationNodeRelation(key: AnyDistributedReceptionKey, node: Cluster.Node?) { if let node = node { self._registeredKeysByNode[node, default: []].insert(key) } } - private func removeSingleRegistrationNodeRelation(key: AnyDistributedReceptionKey, node: UniqueNode?) { + private func removeSingleRegistrationNodeRelation(key: AnyDistributedReceptionKey, node: Cluster.Node?) { // FIXME: Implement me (!), we need to make the storage a counter // and decrement here by one; once the counter reaches zero we know there is no more relationship // and we can prune this key/node relationship @@ -298,7 +298,7 @@ internal final class DistributedReceptionistStorage { /// (as they only were interested on things on the now-removed node). This allows us to eagerly and "in batch" give them a listing update /// *once* with all the remote actors removed, rather than trickling in the changes to the Listing one by one (as it would be the case /// if we waited for Terminated signals to trickle in and handle these removals one by one then). - func pruneNode(_ node: UniqueNode) -> PrunedNodeDirective { + func pruneNode(_ node: Cluster.Node) -> PrunedNodeDirective { let prune = PrunedNodeDirective() guard let keys = self._registeredKeysByNode[node] else { @@ -311,7 +311,7 @@ internal final class DistributedReceptionistStorage { // 1) we remove any registrations that it hosted let registrations = self._registrations.removeValue(forKey: key) ?? [] let remainingRegistrations = registrations._filter { registration in // FIXME(collections): missing type preserving filter on OrderedSet https://github.com/apple/swift-collections/pull/159 - registration.actorID.uniqueNode != node + registration.actorID.node != node } if !remainingRegistrations.isEmpty { self._registrations[key] = remainingRegistrations diff --git a/Sources/DistributedCluster/Receptionist/Receptionist.swift b/Sources/DistributedCluster/Receptionist/Receptionist.swift index b591dcb21..301e78a11 100644 --- a/Sources/DistributedCluster/Receptionist/Receptionist.swift +++ b/Sources/DistributedCluster/Receptionist/Receptionist.swift @@ -139,7 +139,7 @@ public struct Receptionist { /// Per (receptionist) node mapping of which keys are presently known to this receptionist on the given node. /// This is used to perform quicker cleanups upon a node/receptionist crashing, and thus all existing references /// on that node should be removed from our storage. - private var _registeredKeysByNode: [UniqueNode: Set] = [:] + private var _registeredKeysByNode: [Cluster.Node: Set] = [:] /// Allows for reverse lookups, when an actor terminates, we know from which registrations and subscriptions to remove it from. internal var _idToKeys: [ActorID: Set] = [:] @@ -150,13 +150,13 @@ public struct Receptionist { /// - returns: `true` if the value was a newly inserted value, `false` otherwise func addRegistration(key: AnyReceptionKey, ref: _AddressableActorRef) -> Bool { self.addRefKeyMapping(id: ref.id, key: key) - self.storeRegistrationNodeRelation(key: key, node: ref.id.uniqueNode) + self.storeRegistrationNodeRelation(key: key, node: ref.id.node) return self.addTo(dict: &self._registrations, key: key, value: ref) } func removeRegistration(key: AnyReceptionKey, ref: _AddressableActorRef) -> Set<_AddressableActorRef>? { _ = self.removeFromKeyMappings(ref) - self.removeSingleRegistrationNodeRelation(key: key, node: ref.id.uniqueNode) + self.removeSingleRegistrationNodeRelation(key: key, node: ref.id.node) return self.removeFrom(dict: &self._registrations, key: key, value: ref) } @@ -164,13 +164,13 @@ public struct Receptionist { self._registrations[key] } - private func storeRegistrationNodeRelation(key: AnyReceptionKey, node: UniqueNode?) { + private func storeRegistrationNodeRelation(key: AnyReceptionKey, node: Cluster.Node?) { if let node = node { self._registeredKeysByNode[node, default: []].insert(key) } } - private func removeSingleRegistrationNodeRelation(key: AnyReceptionKey, node: UniqueNode?) { + private func removeSingleRegistrationNodeRelation(key: AnyReceptionKey, node: Cluster.Node?) { // FIXME: Implement me (!), we need to make the storage a counter // and decrement here by one; once the counter reaches zero we know there is no more relationship // and we can prune this key/node relationship @@ -233,7 +233,7 @@ public struct Receptionist { /// (as they only were interested on things on the now-removed node). This allows us to eagerly and "in batch" give them a listing update /// *once* with all the remote actors removed, rather than trickling in the changes to the Listing one by one (as it would be the case /// if we waited for Terminated signals to trickle in and handle these removals one by one then). - func pruneNode(_ node: UniqueNode) -> PrunedNodeDirective { + func pruneNode(_ node: Cluster.Node) -> PrunedNodeDirective { var prune = PrunedNodeDirective() guard let keys = self._registeredKeysByNode[node] else { @@ -245,14 +245,14 @@ public struct Receptionist { for key in keys { // 1) we remove any registrations that it hosted let registrations: Set<_AddressableActorRef> = self._registrations.removeValue(forKey: key) ?? [] - let remainingRegistrations = registrations.filter { $0.id.uniqueNode != node } + let remainingRegistrations = registrations.filter { $0.id.node != node } if !remainingRegistrations.isEmpty { self._registrations[key] = remainingRegistrations } // 2) and remove any of our subscriptions let subs: Set = self._subscriptions.removeValue(forKey: key) ?? [] - let prunedSubs = subs.filter { $0.id.uniqueNode != node } + let prunedSubs = subs.filter { $0.id.node != node } if remainingRegistrations.count != registrations.count { // only if the set of registered actors for this key was actually affected by this prune // we want to mark it as changed and ensure we contact all of such keys subscribers about the change. @@ -319,7 +319,7 @@ extension ActorID { case distributedActors } - static func _receptionist(on node: UniqueNode, for type: ReceptionistType) -> ActorID { + static func _receptionist(on node: Cluster.Node, for type: ReceptionistType) -> ActorID { switch type { case .actorRefs: return ActorPath.actorRefReceptionist.makeRemoteID(on: node, incarnation: .wellKnown) diff --git a/Sources/DistributedCluster/Refs+any.swift b/Sources/DistributedCluster/Refs+any.swift index d12f0540b..bc552cdc1 100644 --- a/Sources/DistributedCluster/Refs+any.swift +++ b/Sources/DistributedCluster/Refs+any.swift @@ -129,7 +129,7 @@ extension _RemoteClusterActorPersonality { @usableFromInline internal func _tellUnsafe(_ message: Any, file: String = #filePath, line: UInt = #line) { guard let _message = message as? Message else { - traceLog_Remote(self.system.cluster.uniqueNode, "\(self.id)._tellUnsafe [\(message)] failed because of invalid type; self: \(self); Sent at \(file):\(line)") + traceLog_Remote(self.system.cluster.node, "\(self.id)._tellUnsafe [\(message)] failed because of invalid type; self: \(self); Sent at \(file):\(line)") return // TODO: drop the message } diff --git a/Sources/DistributedCluster/Refs.swift b/Sources/DistributedCluster/Refs.swift index 2ee84397d..812b1f37c 100644 --- a/Sources/DistributedCluster/Refs.swift +++ b/Sources/DistributedCluster/Refs.swift @@ -474,7 +474,7 @@ internal struct TheOneWhoHasNoParent: _ReceivesSystemMessages { // FIXME: fix th @usableFromInline let id: ActorID - init(local node: UniqueNode) { + init(local node: Cluster.Node) { self.id = ActorID._localRoot(on: node) } @@ -557,7 +557,7 @@ public class _Guardian { private var stopping: Bool = false weak var system: ClusterSystem? - init(parent: _ReceivesSystemMessages, name: String, localNode: UniqueNode, system: ClusterSystem) { + init(parent: _ReceivesSystemMessages, name: String, localNode: Cluster.Node, system: ClusterSystem) { assert(parent.id == ActorID._localRoot(on: localNode), "A Guardian MUST live directly under the `/` path.") do { diff --git a/Sources/DistributedCluster/Serialization/ActorRef+Serialization.swift b/Sources/DistributedCluster/Serialization/ActorRef+Serialization.swift index 2801fe5af..e7f3edf65 100644 --- a/Sources/DistributedCluster/Serialization/ActorRef+Serialization.swift +++ b/Sources/DistributedCluster/Serialization/ActorRef+Serialization.swift @@ -212,7 +212,7 @@ extension ActorIncarnation: Codable { // ==== ---------------------------------------------------------------------------------------------------------------- // MARK: Codable Node Address -extension Node: Codable { +extension Cluster.Endpoint: Codable { // FIXME: encode as authority/URI with optimized parser here, this will be executed many many times... public func encode(to encoder: Encoder) throws { var container = encoder.unkeyedContainer() @@ -234,17 +234,17 @@ extension Node: Codable { } } -extension UniqueNode: Codable { +extension Cluster.Node: Codable { // FIXME: encode as authority/URI with optimized parser here, this will be executed many many times... public func encode(to encoder: Encoder) throws { var container = encoder.unkeyedContainer() - try container.encode(self.node.protocol) + try container.encode(self.endpoint.protocol) // :// - try container.encode(self.node.systemName) + try container.encode(self.endpoint.systemName) // @ - try container.encode(self.node.host) + try container.encode(self.endpoint.host) // : - try container.encode(self.node.port) + try container.encode(self.endpoint.port) // # try container.encode(self.nid.value) } @@ -255,8 +255,8 @@ extension UniqueNode: Codable { let systemName = try container.decode(String.self) let host = try container.decode(String.self) let port = try container.decode(Int.self) - self.node = Node(protocol: `protocol`, systemName: systemName, host: host, port: port) - self.nid = try UniqueNodeID(container.decode(UInt64.self)) + self.endpoint = Cluster.Endpoint(protocol: `protocol`, systemName: systemName, host: host, port: port) + self.nid = try Cluster.Node.ID(container.decode(UInt64.self)) } } diff --git a/Sources/DistributedCluster/Serialization/Protobuf/Serialization.pb.swift b/Sources/DistributedCluster/Serialization/Protobuf/Serialization.pb.swift index 2a53bf9ba..9c02fece6 100644 --- a/Sources/DistributedCluster/Serialization/Protobuf/Serialization.pb.swift +++ b/Sources/DistributedCluster/Serialization/Protobuf/Serialization.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: Serialization/Serialization.proto @@ -27,7 +26,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -59,12 +58,9 @@ extension _ProtoManifest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplement public mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularUInt32Field(value: &self.serializerID) }() - case 2: try { try decoder.decodeSingularStringField(value: &self.hint) }() + case 1: try decoder.decodeSingularUInt32Field(value: &self.serializerID) + case 2: try decoder.decodeSingularStringField(value: &self.hint) default: break } } diff --git a/Sources/DistributedCluster/Serialization/Serialization+Context.swift b/Sources/DistributedCluster/Serialization/Serialization+Context.swift index 072ce54e5..3a2832522 100644 --- a/Sources/DistributedCluster/Serialization/Serialization+Context.swift +++ b/Sources/DistributedCluster/Serialization/Serialization+Context.swift @@ -37,8 +37,8 @@ extension Serialization { public let allocator: NIO.ByteBufferAllocator /// Address to be included in serialized actor refs if they are local references. - public var localNode: UniqueNode { - self.system.cluster.uniqueNode + public var localNode: Cluster.Node { + self.system.cluster.node } internal init(log: Logger, system: ClusterSystem, allocator: NIO.ByteBufferAllocator) { diff --git a/Sources/DistributedCluster/Serialization/Serialization+Settings.swift b/Sources/DistributedCluster/Serialization/Serialization+Settings.swift index 1e644eae0..396ad07de 100644 --- a/Sources/DistributedCluster/Serialization/Serialization+Settings.swift +++ b/Sources/DistributedCluster/Serialization/Serialization+Settings.swift @@ -52,15 +52,15 @@ extension Serialization { /// - Note: Affects only _outbound_ messages which are `Codable`. public var defaultSerializerID: Serialization.SerializerID = .foundationJSON - /// `UniqueNode` to be included in actor addresses when serializing them. + /// `Cluster.Node` to be included in actor addresses when serializing them. /// By default this should be equal to the exposed node of the actor system. /// /// If clustering is not configured on this node, this value SHOULD be `nil`, /// as it is not useful to render any address for actors which shall never be reached remotely. /// /// This is set automatically when modifying the systems cluster settings. - internal var localNode: UniqueNode = - .init(systemName: "", host: "127.0.0.1", port: 7337, nid: UniqueNodeID(0)) + internal var localNode: Cluster.Node = + .init(systemName: "", host: "127.0.0.1", port: 7337, nid: Cluster.Node.ID(0)) /// Applied before automatically selecting a serializer based on manifest. /// Allows to deserialize incoming messages when "the same" message is now represented on this system differently. diff --git a/Sources/DistributedCluster/Serialization/Serialization.swift b/Sources/DistributedCluster/Serialization/Serialization.swift index bb7f6d33c..9c5ba7204 100644 --- a/Sources/DistributedCluster/Serialization/Serialization.swift +++ b/Sources/DistributedCluster/Serialization/Serialization.swift @@ -151,7 +151,7 @@ public class Serialization { var log = system.log // TODO: Dry up setting this metadata - log[metadataKey: "node"] = .stringConvertible(systemSettings.uniqueBindNode) + log[metadataKey: "node"] = .stringConvertible(systemSettings.bindNode) log[metadataKey: "actor/path"] = "/system/serialization" // TODO: this is a fake path, we could use log source: here if it gets merged log.logLevel = systemSettings.logging.logLevel self.log = log diff --git a/Sources/DistributedCluster/SystemMessages.swift b/Sources/DistributedCluster/SystemMessages.swift index c1dd17ae5..2c4ca1790 100644 --- a/Sources/DistributedCluster/SystemMessages.swift +++ b/Sources/DistributedCluster/SystemMessages.swift @@ -69,7 +69,7 @@ internal enum _SystemMessage: Equatable { /// Node has terminated, and all actors of this node shall be considered as terminated. /// This system message does _not_ have a direct counter part as `Signal`, and instead results in the sending of multiple /// `Signals.Terminated` messages, for every watched actor which was residing on the (now terminated) node. - case nodeTerminated(UniqueNode) // TODO: more additional info? + case nodeTerminated(Cluster.Node) // TODO: more additional info? /// Sent by parent to child actor to stop it case stop diff --git a/Sources/DistributedCluster/_ActorShell.swift b/Sources/DistributedCluster/_ActorShell.swift index e0cadd819..42c3083e6 100644 --- a/Sources/DistributedCluster/_ActorShell.swift +++ b/Sources/DistributedCluster/_ActorShell.swift @@ -834,7 +834,7 @@ extension _ActorShell { /// This action is performed concurrently by all actors who have watched remote actors on given node, /// and no ordering guarantees are made about which actors will get the Terminated signals first. @inlinable - func interpretNodeTerminated(_ terminatedNode: UniqueNode) { + func interpretNodeTerminated(_ terminatedNode: Cluster.Node) { #if SACT_TRACE_ACTOR_SHELL self.log.info("Received address terminated: \(terminatedNode)") #endif diff --git a/Sources/DistributedCluster/utils.swift b/Sources/DistributedCluster/utils.swift index 27221aec3..7b34c6711 100644 --- a/Sources/DistributedCluster/utils.swift +++ b/Sources/DistributedCluster/utils.swift @@ -203,7 +203,7 @@ func traceLog_Serialization(_ message: @autoclosure () -> String, file: String = } /// INTERNAL API: Used for easier debugging; most of those messages are meant to be eventually removed -func traceLog_Remote(_ node: UniqueNode, _ message: @autoclosure () -> String, file: String = #fileID, line: UInt = #line) { +func traceLog_Remote(_ node: Cluster.Node, _ message: @autoclosure () -> String, file: String = #fileID, line: UInt = #line) { #if SACT_TRACE_REMOTE pprint("SACT_TRACE_REMOTE [\(node)]: \(message())", file: file, line: line) #endif diff --git a/Sources/MultiNodeTestKit/MultiNodeTestConductor.swift b/Sources/MultiNodeTestKit/MultiNodeTestConductor.swift index 02456f608..34e628873 100644 --- a/Sources/MultiNodeTestKit/MultiNodeTestConductor.swift +++ b/Sources/MultiNodeTestKit/MultiNodeTestConductor.swift @@ -12,7 +12,6 @@ // //===----------------------------------------------------------------------===// -import Distributed import DistributedCluster import Logging @@ -22,7 +21,7 @@ public distributed actor MultiNodeTestConductor: ClusterSingleton, CustomStringC typealias NodeName = String let name: NodeName - var allNodes: Set + var allNodes: Set // TODO: also add readyNodes here lazy var log = Logger(actor: self) @@ -31,14 +30,14 @@ public distributed actor MultiNodeTestConductor: ClusterSingleton, CustomStringC // === Checkpoints var activeCheckPoint: MultiNode.Checkpoint? - var nodesAtCheckPoint: [String /* FIXME: should be UniqueNode*/: CheckedContinuation] - func setContinuation(node: String /* FIXME: should be UniqueNode*/, cc: CheckedContinuation) { + var nodesAtCheckPoint: [String /* FIXME: should be Cluster.Node*/: CheckedContinuation] + func setContinuation(node: String /* FIXME: should be Cluster.Node*/, cc: CheckedContinuation) { self.nodesAtCheckPoint[node] = cc } private var clusterEventsTask: Task? - public init(name: String, allNodes: Set, settings: MultiNodeTestSettings, actorSystem: ActorSystem) { + public init(name: String, allNodes: Set, settings: MultiNodeTestSettings, actorSystem: ActorSystem) { self.actorSystem = actorSystem self.settings = settings self.allNodes = allNodes @@ -74,7 +73,7 @@ extension MultiNodeTestConductor { /// Used to check if the conductor is responsive. public distributed func ping(message: String, from node: String) -> String { self.actorSystem.log.info("Conductor received ping: \(message) from \(node) (node.length: \(node.count))") - return "pong:\(message) (conductor node: \(self.actorSystem.cluster.uniqueNode))" + return "pong:\(message) (conductor node: \(self.actorSystem.cluster.node))" } } @@ -83,7 +82,7 @@ extension MultiNodeTestConductor { extension MultiNodeTestConductor { /// Helper function which sets a large timeout for this remote call -- the call will suspend until all nodes have arrived at the checkpoint - public nonisolated func enterCheckPoint(node: String /* FIXME: should be UniqueNode*/, + public nonisolated func enterCheckPoint(node: String /* FIXME: should be Cluster.Node*/, checkPoint: MultiNode.Checkpoint, waitTime: Duration) async throws { @@ -96,7 +95,7 @@ extension MultiNodeTestConductor { } /// Reentrant; all nodes will enter the checkpoint and eventually be resumed once all have arrived. - internal distributed func _enterCheckPoint(node: String /* FIXME: should be UniqueNode*/, + internal distributed func _enterCheckPoint(node: String /* FIXME: should be Cluster.Node*/, checkPoint: MultiNode.Checkpoint) async throws { self.actorSystem.log.warning("Conductor received `enterCheckPoint` FROM \(node) INNER RECEIVED") @@ -114,7 +113,7 @@ extension MultiNodeTestConductor { } } - func enterActiveCheckPoint(_ node: String /* FIXME: should be UniqueNode*/, checkPoint: MultiNode.Checkpoint) async throws { + func enterActiveCheckPoint(_ node: String /* FIXME: should be Cluster.Node*/, checkPoint: MultiNode.Checkpoint) async throws { guard self.nodesAtCheckPoint[node] == nil else { throw MultiNodeCheckPointError( nodeName: node, @@ -180,14 +179,14 @@ extension MultiNodeTestConductor { } var checkpointMissingNodes: Set { - var missing = Set(self.allNodes.map(\.node.systemName)) + var missing = Set(self.allNodes.map(\.endpoint.systemName)) for node in self.nodesAtCheckPoint.keys { missing.remove(node) } return missing } - func activateCheckPoint(_ node: String /* FIXME: should be UniqueNode*/, checkPoint: MultiNode.Checkpoint) async throws { + func activateCheckPoint(_ node: String /* FIXME: should be Cluster.Node*/, checkPoint: MultiNode.Checkpoint) async throws { guard self.activeCheckPoint == nil else { throw MultiNodeCheckPointError( nodeName: node, @@ -199,7 +198,7 @@ extension MultiNodeTestConductor { try await self.enterActiveCheckPoint(node, checkPoint: checkPoint) } - func enterIllegalCheckpoint(_ node: String /* FIXME: should be UniqueNode*/, + func enterIllegalCheckpoint(_ node: String /* FIXME: should be Cluster.Node*/, active activeCheckPoint: MultiNode.Checkpoint, entered enteredCheckPoint: MultiNode.Checkpoint) throws { @@ -210,7 +209,7 @@ extension MultiNodeTestConductor { } func checkpointNodeBecameDown(_ change: Cluster.MembershipChange) { - let nodeName = change.node.node.systemName + let nodeName = change.node.endpoint.systemName guard let checkpoint = self.activeCheckPoint else { return } @@ -257,11 +256,11 @@ extension MultiNodeTestConductor { case .membershipChange(let change): if change.status.isAtLeast(.down) { /// If there are nodes waiting on a checkpoint, and they became down, they will never reach the checkpoint! - if self.nodesAtCheckPoint.contains(where: { $0.key == change.node.node.systemName }) { + if self.nodesAtCheckPoint.contains(where: { $0.key == change.node.endpoint.systemName }) { self.checkpointNodeBecameDown(change) } - self.allNodes.remove(change.member.uniqueNode) + self.allNodes.remove(change.member.node) } default: return diff --git a/Sources/MultiNodeTestKit/MultiNodeTestKit+Control.swift b/Sources/MultiNodeTestKit/MultiNodeTestKit+Control.swift index 166104112..7bb59a02b 100644 --- a/Sources/MultiNodeTestKit/MultiNodeTestKit+Control.swift +++ b/Sources/MultiNodeTestKit/MultiNodeTestKit+Control.swift @@ -42,7 +42,7 @@ extension MultiNodeTest { /// this logger is the same as the actor system's default logger. public var log = Logger(label: "multi-node") - public var _allNodes: [String: Node] = [:] + public var _allEndpoints: [String: Cluster.Endpoint] = [:] public var _conductor: MultiNodeTestConductor? public var conductor: MultiNodeTestConductor { @@ -53,17 +53,17 @@ extension MultiNodeTest { self.system.cluster } - public var allNodes: some Collection { - self._allNodes.values + public var allNodes: some Collection { + self._allEndpoints.values } public init(nodeName: String) { self.nodeName = nodeName } - public subscript(_ nid: Nodes) -> Node { - guard let node = self._allNodes[nid.rawValue] else { - fatalError("No node present for [\(nid.rawValue)], available: \(self._allNodes) (on \(self.system))") + public subscript(_ nid: Nodes) -> Cluster.Endpoint { + guard let node = self._allEndpoints[nid.rawValue] else { + fatalError("No node present for [\(nid.rawValue)], available: \(self._allEndpoints) (on \(self.system))") } return node @@ -143,7 +143,7 @@ extension MultiNodeTest.Control { do { try await self.conductor.enterCheckPoint( - node: self.system.name, // FIXME: should be: self.system.cluster.uniqueNode, + node: self.system.name, // FIXME: should be: self.system.cluster.node, checkPoint: checkPoint, waitTime: waitTime ?? .seconds(30) ) diff --git a/Sources/MultiNodeTestKit/MultiNodeTestKit.swift b/Sources/MultiNodeTestKit/MultiNodeTestKit.swift index 777d0a131..5b5385c56 100644 --- a/Sources/MultiNodeTestKit/MultiNodeTestKit.swift +++ b/Sources/MultiNodeTestKit/MultiNodeTestKit.swift @@ -66,7 +66,14 @@ public protocol MultiNodeNodes: Hashable, CaseIterable { public protocol MultiNodeTestControlProtocol { var _actorSystem: ClusterSystem? { get set } var _conductor: MultiNodeTestConductor? { get set } - var _allNodes: [String: Node] { get set } + var _allEndpoints: [String: Cluster.Endpoint] { get set } + func _allEndpoints(except name: String) -> [Cluster.Endpoint] +} + +extension MultiNodeTestControlProtocol { + public func _allEndpoints(except nodeName: String) -> [Cluster.Endpoint] { + self._allEndpoints.values.filter { $0.systemName != nodeName } + } } public protocol MultiNodeTestSuite { diff --git a/Sources/MultiNodeTestKitRunner/boot+MultiNodeTestKitRunner+Exec.swift b/Sources/MultiNodeTestKitRunner/boot+MultiNodeTestKitRunner+Exec.swift index c08858b74..85867bc25 100644 --- a/Sources/MultiNodeTestKitRunner/boot+MultiNodeTestKitRunner+Exec.swift +++ b/Sources/MultiNodeTestKitRunner/boot+MultiNodeTestKitRunner+Exec.swift @@ -35,8 +35,8 @@ extension MultiNodeTestKitRunnerBoot { allNodes multiNodeEndpoints: [MultiNode.Endpoint]) async throws { var control = multiNodeTest.makeControl(nodeName) - control._allNodes = convertAllNodes(allNodes: multiNodeEndpoints) - let myNode = control._allNodes[nodeName]! // !-safe, we just prepared this node collection + control._allEndpoints = convertAllNodes(allNodes: multiNodeEndpoints) + let myNode = control._allEndpoints[nodeName]! // !-safe, we just prepared this node collection var multiNodeSettings = MultiNodeTestSettings() multiNodeTest.configureMultiNodeTest(&multiNodeSettings) @@ -84,20 +84,20 @@ extension MultiNodeTestKitRunnerBoot { // join all the other nodes print("CLUSTER JOIN ============================================".yellow) - let otherNodes = control._allNodes.values.filter { $0.systemName != nodeName } - for other in otherNodes { + let otherEndpoints = control._allEndpoints(except: nodeName) + for other in otherEndpoints { log("Prepare cluster: join [\(nodeName)] with \(other)") - actorSystem.cluster.join(node: other) + actorSystem.cluster.join(endpoint: other) } - var allNodes: Set = [actorSystem.cluster.uniqueNode] - for other in otherNodes { - let joinedOther = try await actorSystem.cluster.joined(node: other, within: multiNodeSettings.initialJoinTimeout) + var allNodes: Set = [actorSystem.cluster.node] + for other in otherEndpoints { + let joinedOther = try await actorSystem.cluster.joined(endpoint: other, within: multiNodeSettings.initialJoinTimeout) guard let joinedOther else { fatalError("[multi-node][\(nodeName)] Failed to join \(other)!") } - print("[multi-node] [\(actorSystem.cluster.uniqueNode)] <= joined => \(joinedOther)") - allNodes.insert(joinedOther.uniqueNode) + print("[multi-node] [\(actorSystem.cluster.node)] <= joined => \(joinedOther)") + allNodes.insert(joinedOther.node) } let conductorSingletonSettings = ClusterSingletonSettings() @@ -127,9 +127,9 @@ extension MultiNodeTestKitRunnerBoot { try actorSystem.shutdown() } - func convertAllNodes(allNodes: [MultiNode.Endpoint]) -> [String: Node] { + func convertAllNodes(allNodes: [MultiNode.Endpoint]) -> [String: Cluster.Endpoint] { let nodeList = allNodes.map { mn in - let n = Node(systemName: mn.name, host: mn.sactHost, port: mn.sactPort) + let n = Cluster.Endpoint(systemName: mn.name, host: mn.sactHost, port: mn.sactPort) return (n.systemName, n) } diff --git a/Tests/DistributedActorsDocumentationTests/ClusterDocExamples.swift b/Tests/DistributedActorsDocumentationTests/ClusterDocExamples.swift index 883afb6b0..0389ccf34 100644 --- a/Tests/DistributedActorsDocumentationTests/ClusterDocExamples.swift +++ b/Tests/DistributedActorsDocumentationTests/ClusterDocExamples.swift @@ -30,8 +30,8 @@ class ClusterDocExamples: XCTestCase { // system will bind by default on `localhost:7337` } - let otherNode = Node(systemName: "ClusterJoining", host: "localhost", port: 8228) - system.cluster.join(node: otherNode) // <2> + let otherNode = Endpoint(systemName: "ClusterJoining", host: "localhost", port: 8228) + system.cluster.join(endpoint: otherNode) // <2> // end::joining[] } @@ -39,7 +39,7 @@ class ClusterDocExamples: XCTestCase { func example_discovery_joining_seedNodes() { class SomeSpecificServiceDiscovery: ServiceDiscovery { typealias Service = String - typealias Instance = Node + typealias Instance = Endpoint private(set) var defaultLookupTimeout: DispatchTimeInterval = .seconds(3) @@ -87,8 +87,8 @@ class ClusterDocExamples: XCTestCase { settings.discovery = ServiceDiscoverySettings( SomeGenericServiceDiscovery( /* configuration */ ), // <1> service: "my-service", - mapInstanceToNode: { (instance: SomeGenericServiceDiscovery.Instance) -> Node in // <2> - Node(systemName: "", host: instance.host, port: instance.port) + mapInstanceToNode: { (instance: SomeGenericServiceDiscovery.Instance) -> Endpoint in // <2> + Endpoint(systemName: "", host: instance.host, port: instance.port) } ) } diff --git a/Tests/DistributedActorsDocumentationTests/Protobuf/SerializationDocExamples.pb.swift b/Tests/DistributedActorsDocumentationTests/Protobuf/SerializationDocExamples.pb.swift index 223b71f0a..56c080079 100644 --- a/Tests/DistributedActorsDocumentationTests/Protobuf/SerializationDocExamples.pb.swift +++ b/Tests/DistributedActorsDocumentationTests/Protobuf/SerializationDocExamples.pb.swift @@ -1,5 +1,4 @@ // DO NOT EDIT. -// swift-format-ignore-file // // Generated by the Swift generator plugin for the protocol buffer compiler. // Source: SerializationDocExamples.proto @@ -29,7 +28,7 @@ import SwiftProtobuf // If the compiler emits an error on this type, it is because this file // was generated by a version of the `protoc` Swift plug-in that is // incompatible with the version of SwiftProtobuf to which you are linking. -// Please ensure that you are building against the same version of the API +// Please ensure that your are building against the same version of the API // that was used to generate this file. fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} @@ -98,11 +97,8 @@ extension _ProtoParkingGarageStatus: SwiftProtobuf.Message, SwiftProtobuf._Messa mutating func decodeMessage(decoder: inout D) throws { while let fieldNumber = try decoder.nextFieldNumber() { - // The use of inline closures is to circumvent an issue where the compiler - // allocates stack space for every case branch when no optimizations are - // enabled. https://github.com/apple/swift-protobuf/issues/1034 switch fieldNumber { - case 1: try { try decoder.decodeSingularEnumField(value: &self.type) }() + case 1: try decoder.decodeSingularEnumField(value: &self.type) default: break } } diff --git a/Tests/DistributedClusterTests/ActorIDMetadataTests.swift b/Tests/DistributedClusterTests/ActorIDMetadataTests.swift index c6177a867..dc6e9a19c 100644 --- a/Tests/DistributedClusterTests/ActorIDMetadataTests.swift +++ b/Tests/DistributedClusterTests/ActorIDMetadataTests.swift @@ -119,7 +119,7 @@ final class ActorIDMetadataTests: ClusteredActorSystemsXCTestCase { let singleton = await ThereCanBeOnlyOneClusterSingleton(actorSystem: system) - let madeUpID = ActorID(local: system.cluster.uniqueNode, path: singleton.id.path, incarnation: .wellKnown) + let madeUpID = ActorID(local: system.cluster.node, path: singleton.id.path, incarnation: .wellKnown) madeUpID.metadata.wellKnown = singleton.id.metadata.wellKnown! singleton.id.shouldEqual(madeUpID) diff --git a/Tests/DistributedClusterTests/ActorIDTests.swift b/Tests/DistributedClusterTests/ActorIDTests.swift index 7c51d7f77..7c2d3802e 100644 --- a/Tests/DistributedClusterTests/ActorIDTests.swift +++ b/Tests/DistributedClusterTests/ActorIDTests.swift @@ -18,7 +18,7 @@ import XCTest final class ActorIDTests: ClusteredActorSystemsXCTestCase { func test_local_actorAddress_shouldPrintNicely() throws { - let node: UniqueNode = .init(protocol: "sact", systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) + let node = Cluster.Node(systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) let id = try ActorID(local: node, path: ActorPath._user.appending("hello"), incarnation: ActorIncarnation(8888)) "\(id)".shouldEqual("/user/hello") "\(id.name)".shouldEqual("hello") @@ -37,9 +37,9 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_remote_actorAddress_shouldPrintNicely() throws { - let localNode: UniqueNode = .init(protocol: "sact", systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) + let localNode = Cluster.Node(systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) let id = try ActorID(local: localNode, path: ActorPath._user.appending("hello"), incarnation: ActorIncarnation(8888)) - let remoteNode = UniqueNode(systemName: "system", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let remoteNode = Cluster.Node(systemName: "system", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let remote = ActorID(remote: remoteNode, path: id.path, incarnation: ActorIncarnation(8888)) remote.detailedDescription.shouldEqual("sact://system:11111@127.0.0.1:1234/user/hello#8888[\"$path\": /user/hello]") @@ -56,7 +56,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { // MARK: Equality & Sorting func test_equalityOf_idWithSameSegmentsButDifferentIncarnation() throws { - let node: UniqueNode = .init(protocol: "sact", systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) + let node = Cluster.Node(systemName: "\(Self.self)", host: "127.0.0.1", port: 7337, nid: .random()) let one = try ActorPath(root: "test").makeChildPath(name: "foo").makeLocalID(on: node, incarnation: .random()) let two = try ActorPath(root: "test").makeChildPath(name: "foo").makeLocalID(on: node, incarnation: .random()) @@ -69,30 +69,30 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { func test_equalityOf_idWithDifferentSystemNameOnly() throws { let path = try ActorPath._user.appending("hello") - let one = ActorID(local: UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) - let two = ActorID(local: UniqueNode(systemName: "two", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) + let one = ActorID(local: Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) + let two = ActorID(local: Cluster.Node(systemName: "two", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) one.shouldEqual(two) } func test_equalityOf_idWithDifferentSystemNameOnly_remote() throws { let path = try ActorPath._user.appending("hello") - let one = ActorID(remote: UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) - let two = ActorID(remote: UniqueNode(systemName: "two", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) + let one = ActorID(remote: Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) + let two = ActorID(remote: Cluster.Node(systemName: "two", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) one.shouldEqual(two) } func test_equalityOf_idWithDifferentSystemNameOnly_local_remote() throws { let path = try ActorPath._user.appending("hello") - let one = ActorID(local: UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) - let two = ActorID(remote: UniqueNode(systemName: "two", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)), path: path, incarnation: ActorIncarnation(88)) + let one = ActorID(local: Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) + let two = ActorID(remote: Cluster.Node(systemName: "two", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)), path: path, incarnation: ActorIncarnation(88)) one.shouldEqual(two) } func test_equalityOf_idWithDifferentSegmentsButSameUID() throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let one = try ActorPath(root: "test").makeChildPath(name: "foo").makeLocalID(on: node, incarnation: .random()) let one2 = try ActorPath(root: "test").makeChildPath(name: "foo2").makeLocalID(on: node, incarnation: one.incarnation) @@ -100,7 +100,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_sortingOf_actorIDs() throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) var ids: [ActorID] = [] let a: ActorID = try ActorPath._user.appending("a").makeLocalID(on: node, incarnation: .random()) let b: ActorID = try ActorPath._user.appending("b").makeLocalID(on: node, incarnation: .random()) @@ -114,7 +114,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_sortingOf_sameNode_actorIDs() throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) var ids: [ActorID] = [] let a: ActorID = try ActorPath._user.appending("a").makeLocalID(on: node, incarnation: .wellKnown) let b: ActorID = try ActorPath._user.appending("b").makeLocalID(on: node, incarnation: .wellKnown) @@ -128,7 +128,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_sortingOf_diffNodes_actorIDs() throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) var ids: [ActorID] = [] let a: ActorID = try ActorPath._user.appending("a").makeRemoteID(on: node, incarnation: 1) let b: ActorID = try ActorPath._user.appending("a").makeRemoteID(on: node, incarnation: 1) @@ -145,7 +145,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { // MARK: Coding func test_encodeDecode_ActorAddress_withoutSerializationContext() async throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let a = try ActorPath._user.appending("a").makeRemoteID(on: node, incarnation: 1) let addressWithoutTestTag = a a.metadata.test = "test-value" @@ -163,7 +163,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_serializing_ActorAddress_skipCustomTag() async throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let a = try ActorPath._user.appending("a").makeRemoteID(on: node, incarnation: 1) a.metadata.test = "test-value" @@ -182,7 +182,7 @@ final class ActorIDTests: ClusteredActorSystemsXCTestCase { } func test_serializing_ActorAddress_propagateCustomTag() async throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let a = try ActorPath._user.appending("a").makeRemoteID(on: node, incarnation: 1) a.metadata.test = "test-value" diff --git a/Tests/DistributedClusterTests/ActorRefAdapterTests.swift b/Tests/DistributedClusterTests/ActorRefAdapterTests.swift index 5c7cd87e9..23b7d6dcf 100644 --- a/Tests/DistributedClusterTests/ActorRefAdapterTests.swift +++ b/Tests/DistributedClusterTests/ActorRefAdapterTests.swift @@ -46,8 +46,8 @@ class _ActorRefAdapterTests: SingleClusterSystemXCTestCase { func test_adaptedRef_overNetwork_shouldConvertMessages() async throws { let firstSystem = await setUpNode("One-RemoteActorRefAdapterTests") { settings in settings.enabled = true - settings.node.host = "127.0.0.1" - settings.node.port = 1881 + settings.endpoint.host = "127.0.0.1" + settings.endpoint.port = 1881 } let firstTestKit = self.testKit(firstSystem) let probe = firstTestKit.makeTestProbe(expecting: String.self) @@ -55,11 +55,11 @@ class _ActorRefAdapterTests: SingleClusterSystemXCTestCase { let systemTwo = await setUpNode("Two-RemoteActorRefAdapterTests") { settings in settings.enabled = true - settings.node.host = "127.0.0.1" - settings.node.port = 1991 + settings.endpoint.host = "127.0.0.1" + settings.endpoint.port = 1991 } - firstSystem.cluster.join(node: systemTwo.settings.node) + firstSystem.cluster.join(endpoint: systemTwo.settings.endpoint) sleep(2) diff --git a/Tests/DistributedClusterTests/Clocks/Protobuf/VersionVector+SerializationTests.swift b/Tests/DistributedClusterTests/Clocks/Protobuf/VersionVector+SerializationTests.swift index 27c6ec55a..d9a15a925 100644 --- a/Tests/DistributedClusterTests/Clocks/Protobuf/VersionVector+SerializationTests.swift +++ b/Tests/DistributedClusterTests/Clocks/Protobuf/VersionVector+SerializationTests.swift @@ -17,8 +17,8 @@ import DistributedActorsTestKit import XCTest final class VersionVectorSerializationTests: SingleClusterSystemXCTestCase { - var node: UniqueNode { - self.system.cluster.uniqueNode + var node: Cluster.Node { + self.system.cluster.node } lazy var idA = try! ActorID(local: node, path: ActorPath._user.appending("A"), incarnation: .wellKnown) diff --git a/Tests/DistributedClusterTests/Clocks/VersionVectorTests.swift b/Tests/DistributedClusterTests/Clocks/VersionVectorTests.swift index 63198a1c7..bf9b09d11 100644 --- a/Tests/DistributedClusterTests/Clocks/VersionVectorTests.swift +++ b/Tests/DistributedClusterTests/Clocks/VersionVectorTests.swift @@ -20,7 +20,7 @@ final class VersionVectorTests: XCTestCase { private typealias VV = VersionVector private typealias V = VersionVector.Version - let node: UniqueNode = .init(protocol: "sact", systemName: "Test", host: "127.0.0.1", port: 7337, nid: .random()) + let node = Cluster.Node(systemName: "Test", host: "127.0.0.1", port: 7337, nid: .random()) lazy var replicaA = ReplicaID.actorID(try! ActorPath._user.appending("A").makeLocalID(on: node, incarnation: .random())) lazy var replicaB = ReplicaID.actorID(try! ActorPath._user.appending("B").makeLocalID(on: node, incarnation: .random())) lazy var replicaC = ReplicaID.actorID(try! ActorPath._user.appending("C").makeLocalID(on: node, incarnation: .random())) diff --git a/Tests/DistributedClusterTests/Cluster/AssociationClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/AssociationClusteredTests.swift index 205f3223d..8a0e8a469 100644 --- a/Tests/DistributedClusterTests/Cluster/AssociationClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/AssociationClusteredTests.swift @@ -31,10 +31,10 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { func test_boundServer_shouldAcceptAssociate() async throws { let (first, second) = await setUpPair() - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) } func test_boundServer_shouldAcceptAssociate_raceFromBothNodes() async throws { @@ -44,46 +44,46 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { let n5 = await setUpNode("node-5") let n6 = await setUpNode("node-6") - first.cluster.join(node: second.cluster.uniqueNode.node) - second.cluster.join(node: first.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + second.cluster.join(endpoint: first.cluster.node.endpoint) - n3.cluster.join(node: first.cluster.uniqueNode.node) - first.cluster.join(node: n3.cluster.uniqueNode.node) + n3.cluster.join(endpoint: first.cluster.node.endpoint) + first.cluster.join(endpoint: n3.cluster.node.endpoint) - n4.cluster.join(node: first.cluster.uniqueNode.node) - first.cluster.join(node: n4.cluster.uniqueNode.node) + n4.cluster.join(endpoint: first.cluster.node.endpoint) + first.cluster.join(endpoint: n4.cluster.node.endpoint) - n5.cluster.join(node: first.cluster.uniqueNode.node) - first.cluster.join(node: n5.cluster.uniqueNode.node) + n5.cluster.join(endpoint: first.cluster.node.endpoint) + first.cluster.join(endpoint: n5.cluster.node.endpoint) - n6.cluster.join(node: first.cluster.uniqueNode.node) - first.cluster.join(node: n6.cluster.uniqueNode.node) + n6.cluster.join(endpoint: first.cluster.node.endpoint) + first.cluster.join(endpoint: n6.cluster.node.endpoint) - try assertAssociated(first, withAtLeast: second.cluster.uniqueNode) - try assertAssociated(second, withAtLeast: first.cluster.uniqueNode) + try assertAssociated(first, withAtLeast: second.cluster.node) + try assertAssociated(second, withAtLeast: first.cluster.node) } func test_handshake_shouldNotifyOnSuccess() async throws { let (first, second) = await setUpPair() - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) } func test_handshake_shouldNotifySuccessWhenAlreadyConnected() async throws { let (first, second) = await setUpPair() - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -92,18 +92,18 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { func test_association_sameAddressNodeJoin_shouldOverrideExistingNode() async throws { let (first, second) = await setUpPair() - let secondName = second.cluster.uniqueNode.node.systemName - let secondPort = second.cluster.uniqueNode.port + let secondName = second.cluster.node.endpoint.systemName + let secondPort = second.cluster.node.port let firstEventsProbe = self.testKit(first).makeTestProbe(expecting: Cluster.Event.self) let secondEventsProbe = self.testKit(second).makeTestProbe(expecting: Cluster.Event.self) await first.cluster.events._subscribe(firstEventsProbe.ref) await second.cluster.events._subscribe(secondEventsProbe.ref) - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) let oldSecond = second let shutdown = try oldSecond.shutdown() // kill second node @@ -118,11 +118,11 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { // the new replacement node is now going to initiate a handshake with 'first' which knew about the previous // instance (oldSecond) on the same node; It should accept this new handshake, and ban the previous node. - secondReplacement.cluster.join(node: first.cluster.uniqueNode.node) + secondReplacement.cluster.join(endpoint: first.cluster.node.endpoint) // verify we are associated ONLY with the appropriate nodes now; - try assertAssociated(first, withExactly: [secondReplacement.cluster.uniqueNode]) - try assertAssociated(secondReplacement, withExactly: [first.cluster.uniqueNode]) + try assertAssociated(first, withExactly: [secondReplacement.cluster.node]) + try assertAssociated(secondReplacement, withExactly: [first.cluster.node]) } func test_association_shouldAllowSendingToSecondReference() async throws { @@ -137,12 +137,12 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { } ) - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + try assertAssociated(first, withExactly: second.settings.bindNode) // first we manually construct the "right second path"; Don't do this in normal production code - let uniqueSecondAddress = ActorID(local: second.cluster.uniqueNode, path: refOnSecondSystem.path, incarnation: refOnSecondSystem.id.incarnation) + let uniqueSecondAddress = ActorID(local: second.cluster.node, path: refOnSecondSystem.path, incarnation: refOnSecondSystem.id.incarnation) // to then obtain a second ref ON the `system`, meaning that the node within uniqueSecondAddress is a second one let resolvedRef = self.resolveRef(first, type: String.self, id: uniqueSecondAddress, on: second) // the resolved ref is a first resource on the `system` and points via the right association to the second actor @@ -155,7 +155,7 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { func test_ignore_attemptToSelfJoinANode() async throws { let alone = await setUpNode("alone") - alone.cluster.join(node: alone.cluster.uniqueNode.node) // "self join", should simply be ignored + alone.cluster.join(endpoint: alone.cluster.node.endpoint) // "self join", should simply be ignored let testKit = self.testKit(alone) try await testKit.eventually(within: .seconds(3)) { @@ -174,22 +174,22 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { // here we attempt to make a race where the nodes race to join each other // again, only one association should be created. - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) - second.cluster.ref.tell(.command(.handshakeWith(first.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) + second.cluster.ref.tell(.command(.handshakeWith(first.cluster.node.endpoint))) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) - try assertAssociated(second, withExactly: first.settings.uniqueBindNode) + try assertAssociated(first, withExactly: second.settings.bindNode) + try assertAssociated(second, withExactly: first.settings.bindNode) } func test_association_shouldEstablishSingleAssociationForConcurrentlyInitiatedHandshakes_outgoing_outgoing() async throws { let (first, second) = await setUpPair() // we issue two handshakes quickly after each other, both should succeed but there should only be one association established (!) - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) - try assertAssociated(second, withExactly: first.settings.uniqueBindNode) + try assertAssociated(first, withExactly: second.settings.bindNode) + try assertAssociated(second, withExactly: first.settings.bindNode) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -198,19 +198,19 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { func test_handshake_shouldKeepTryingUntilOtherNodeBindsPort() async throws { let first = await setUpNode("first") - let secondPort = first.cluster.uniqueNode.node.port + 10 + let secondPort = first.cluster.node.endpoint.port + 10 // second is NOT started, but we already ask first to handshake with the second one (which will fail, though the node should keep trying) - let secondNode = Node(systemName: "second", host: "127.0.0.1", port: secondPort) + let secondEndpoint = Cluster.Endpoint(systemName: "second", host: "127.0.0.1", port: secondPort) - first.cluster.join(node: secondNode) + first.cluster.join(endpoint: secondEndpoint) sleep(3) // we give it some time to keep failing to connect, so the second node is not yet started let second = await setUpNode("second") { settings in settings.bindPort = secondPort } - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) } func test_handshake_shouldStopTryingWhenMaxAttemptsExceeded() async throws { @@ -221,11 +221,11 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { ) } - let secondPort = first.cluster.uniqueNode.node.port + 10 + let secondPort = first.cluster.node.endpoint.port + 10 // second is NOT started, but we already ask first to handshake with the second one (which will fail, though the node should keep trying) - let secondNode = Node(systemName: "second", host: "127.0.0.1", port: secondPort) + let secondEndpoint = Cluster.Endpoint(systemName: "second", host: "127.0.0.1", port: secondPort) - first.cluster.join(node: secondNode) + first.cluster.join(endpoint: secondEndpoint) sleep(1) // we give it some time to keep failing to connect (and exhaust the retries) let logs = self.capturedLogs(of: first) @@ -238,10 +238,10 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { } let second = await setUpNode("second") - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) - try assertNotAssociated(system: first, node: second.cluster.uniqueNode) - try assertNotAssociated(system: second, node: first.cluster.uniqueNode) + try assertNotAssociated(system: first, node: second.cluster.node) + try assertNotAssociated(system: second, node: first.cluster.node) } func test_handshake_shouldNotifyOnRejection() async throws { @@ -250,10 +250,10 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { } let second = await setUpNode("second") - first.cluster.ref.tell(.command(.handshakeWith(second.cluster.uniqueNode.node))) + first.cluster.ref.tell(.command(.handshakeWith(second.cluster.node.endpoint))) - try assertNotAssociated(system: first, node: second.cluster.uniqueNode) - try assertNotAssociated(system: second, node: first.cluster.uniqueNode) + try assertNotAssociated(system: first, node: second.cluster.node) + try assertNotAssociated(system: second, node: first.cluster.node) try self.capturedLogs(of: first) .awaitLogContaining( @@ -271,27 +271,27 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { } let second = await setUpNode("second") - first.cluster.down(node: first.cluster.uniqueNode.node) + first.cluster.down(endpoint: first.cluster.node.endpoint) let testKit = self.testKit(first) try await testKit.eventually(within: .seconds(3)) { let snapshot: Cluster.Membership = await first.cluster.membershipSnapshot - if let selfMember = snapshot.uniqueMember(first.cluster.uniqueNode) { + if let selfMember = snapshot.uniqueMember(first.cluster.node) { if selfMember.status == .down { () // good } else { - throw testKit.error("Expecting \(first.cluster.uniqueNode) to become [.down] but was \(selfMember.status). Membership: \(pretty: snapshot)") + throw testKit.error("Expecting \(first.cluster.node) to become [.down] but was \(selfMember.status). Membership: \(pretty: snapshot)") } } else { - throw testKit.error("No self member for \(first.cluster.uniqueNode)! Membership: \(pretty: snapshot)") + throw testKit.error("No self member for \(first.cluster.node)! Membership: \(pretty: snapshot)") } } // now we try to join the "already down" node; it should reject any such attempts - second.cluster.ref.tell(.command(.handshakeWith(first.cluster.uniqueNode.node))) + second.cluster.ref.tell(.command(.handshakeWith(first.cluster.node.endpoint))) - try assertNotAssociated(system: first, node: second.cluster.uniqueNode) - try assertNotAssociated(system: second, node: first.cluster.uniqueNode) + try assertNotAssociated(system: first, node: second.cluster.node) + try assertNotAssociated(system: second, node: first.cluster.node) try self.capturedLogs(of: second) .awaitLogContaining( @@ -305,17 +305,17 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { func test_cachedSecondControlsWithSameNodeID_shouldNotOverwriteEachOther() async throws { let (first, second) = await setUpPair() - second.cluster.join(node: first.cluster.uniqueNode.node) + second.cluster.join(endpoint: first.cluster.node.endpoint) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) let thirdSystem = await setUpNode("third") { settings in settings.nid = second.settings.nid - settings.node.port = 9119 + settings.endpoint.port = 9119 } - thirdSystem.cluster.join(node: first.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: [second.cluster.uniqueNode, thirdSystem.settings.uniqueBindNode]) + thirdSystem.cluster.join(endpoint: first.cluster.node.endpoint) + try assertAssociated(first, withExactly: [second.cluster.node, thirdSystem.settings.bindNode]) first._cluster?._testingOnly_associations.count.shouldEqual(2) } @@ -330,7 +330,7 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { p2.tell("Got:\(message)") return .same }) - let secondFullAddress = ActorID(remote: second.cluster.uniqueNode, path: secondOne.path, incarnation: secondOne.id.incarnation) + let secondFullAddress = ActorID(remote: second.cluster.node, path: secondOne.path, incarnation: secondOne.id.incarnation) // we somehow obtained a ref to secondOne (on second node) without associating second yet // e.g. another node sent us that ref; This must cause buffering of sends to second and an association to be created. @@ -338,8 +338,8 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { let resolveContext = _ResolveContext(id: secondFullAddress, system: first) let ref = first._resolve(context: resolveContext) - try assertNotAssociated(system: first, node: second.cluster.uniqueNode) - try assertNotAssociated(system: second, node: first.cluster.uniqueNode) + try assertNotAssociated(system: first, node: second.cluster.node) + try assertNotAssociated(system: second, node: first.cluster.node) // will be buffered until associated, and then delivered: ref.tell("Hello 1") @@ -350,8 +350,8 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { try p2.expectMessage("Got:Hello 2") try p2.expectMessage("Got:Hello 3") - try assertAssociated(first, withExactly: second.cluster.uniqueNode) - try assertAssociated(second, withExactly: first.cluster.uniqueNode) + try assertAssociated(first, withExactly: second.cluster.node) + try assertAssociated(second, withExactly: first.cluster.node) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -362,11 +362,11 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { settings.onDownAction = .none // as otherwise we can't inspect if we really changed the status to .down, as we might shutdown too quickly :-) } - second.cluster.join(node: first.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.cluster.uniqueNode) + second.cluster.join(endpoint: first.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.cluster.node) // down myself - first.cluster.down(node: first.cluster.uniqueNode.node) + first.cluster.down(endpoint: first.cluster.node.endpoint) let firstProbe = self.testKit(first).makeTestProbe(expecting: Cluster.Membership.self) let secondProbe = self.testKit(second).makeTestProbe(expecting: Cluster.Membership.self) @@ -376,14 +376,14 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { first.cluster.ref.tell(.query(.currentMembership(firstProbe.ref))) let firstMembership = try firstProbe.expectMessage() - guard let selfMember = firstMembership.uniqueMember(first.cluster.uniqueNode) else { - throw self.testKit(second).error("No self member in membership! Wanted: \(first.cluster.uniqueNode)", line: #line - 1) + guard let selfMember = firstMembership.uniqueMember(first.cluster.node) else { + throw self.testKit(second).error("No self member in membership! Wanted: \(first.cluster.node)", line: #line - 1) } guard selfMember.status == .down else { throw self.testKit(first).error("Wanted self member to be DOWN, but was: \(selfMember)", line: #line - 1) } } - try await self.assertMemberStatus(on: first, node: first.cluster.uniqueNode, is: .down, within: .seconds(3)) + try await self.assertMemberStatus(on: first, node: first.cluster.node, is: .down, within: .seconds(3)) // and the second node should also notice try self.testKit(second).eventually(within: .seconds(3)) { @@ -394,12 +394,12 @@ final class ClusterAssociationTests: ClusteredActorSystemsXCTestCase { // although this may be a best effort since the first can just shut down if it wanted to, // this scenario assumes a graceful leave though: - guard let firstMemberObservedOnSecond = secondMembership.uniqueMember(first.cluster.uniqueNode) else { - throw self.testKit(second).error("\(second) does not know about the \(first.cluster.uniqueNode) at all...!", line: #line - 1) + guard let firstMemberObservedOnSecond = secondMembership.uniqueMember(first.cluster.node) else { + throw self.testKit(second).error("\(second) does not know about the \(first.cluster.node) at all...!", line: #line - 1) } guard firstMemberObservedOnSecond.status == .down else { - throw self.testKit(second).error("Wanted to see \(first.cluster.uniqueNode) as DOWN on \(second), but was still: \(firstMemberObservedOnSecond)", line: #line - 1) + throw self.testKit(second).error("Wanted to see \(first.cluster.node) as DOWN on \(second), but was still: \(firstMemberObservedOnSecond)", line: #line - 1) } } } diff --git a/Tests/DistributedClusterTests/Cluster/ClusterDiscoveryTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterDiscoveryTests.swift index b511a34b9..70fcc9a00 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterDiscoveryTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterDiscoveryTests.swift @@ -20,11 +20,11 @@ import ServiceDiscovery import XCTest final class ClusterDiscoveryTests: SingleClusterSystemXCTestCase { - let A = Cluster.Member(node: UniqueNode(node: Node(systemName: "A", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) - let B = Cluster.Member(node: UniqueNode(node: Node(systemName: "B", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) + let A = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "A", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) + let B = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "B", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) func test_discovery_shouldInitiateJoinsToNewlyDiscoveredNodes() throws { - let discovery = TestTriggeredServiceDiscovery() + let discovery = TestTriggeredServiceDiscovery() let settings = ServiceDiscoverySettings(discovery, service: "example") let clusterProbe = testKit.makeTestProbe(expecting: ClusterShell.Message.self) _ = try system._spawn("discovery", DiscoveryShell(settings: settings, cluster: clusterProbe.ref).behavior) @@ -32,39 +32,39 @@ final class ClusterDiscoveryTests: SingleClusterSystemXCTestCase { discovery.subscribed.wait() // [A], join A - discovery.sendNext(.success([self.A.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint])) guard case .command(.handshakeWith(let node1)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node1.shouldEqual(self.A.uniqueNode.node) + node1.shouldEqual(self.A.node.endpoint) // [A, B], join B - discovery.sendNext(.success([self.A.uniqueNode.node, self.B.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint, self.B.node.endpoint])) guard case .command(.handshakeWith(let node2)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node2.shouldEqual(self.B.uniqueNode.node) + node2.shouldEqual(self.B.node.endpoint) try clusterProbe.expectNoMessage(for: .milliseconds(300)) // i.e. it should not send another join for `A` we already did that // sending another join for A would be harmless in general, but let's avoid causing more work for the system? // [A, B]; should not really emit like this but even if it did, no reason to issue more joins - discovery.sendNext(.success([self.A.uniqueNode.node, self.B.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint, self.B.node.endpoint])) try clusterProbe.expectNoMessage(for: .milliseconds(200)) // [A], removals do not cause removals / downs, one could do this via a downing provider if one wanted to - discovery.sendNext(.success([self.A.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint])) try clusterProbe.expectNoMessage(for: .milliseconds(200)) // [A, B], B is back, this could mean it's a "new" B, so let's issue a join just to be sure. - discovery.sendNext(.success([self.A.uniqueNode.node, self.B.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint, self.B.node.endpoint])) guard case .command(.handshakeWith(let node3)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node3.shouldEqual(self.B.uniqueNode.node) + node3.shouldEqual(self.B.node.endpoint) } func test_discovery_shouldInitiateJoinsToStaticNodes() throws { - let nodes = Set([self.A, self.B].map(\.uniqueNode.node)) + let nodes = Set([self.A, self.B].map(\.node.endpoint)) let settings = ServiceDiscoverySettings(static: Set(nodes)) let clusterProbe = testKit.makeTestProbe(expecting: ClusterShell.Message.self) _ = try system._spawn("discovery", DiscoveryShell(settings: settings, cluster: clusterProbe.ref).behavior) @@ -82,14 +82,14 @@ final class ClusterDiscoveryTests: SingleClusterSystemXCTestCase { let name: String } struct ExampleK8sInstance: Hashable { - let node: Node + let endpoint: Cluster.Endpoint } let discovery = TestTriggeredServiceDiscovery() let settings = ServiceDiscoverySettings( discovery, service: ExampleK8sService(name: "example"), - mapInstanceToNode: { instance in instance.node } + mapInstanceToNode: { instance in instance.endpoint } ) let clusterProbe = testKit.makeTestProbe(expecting: ClusterShell.Message.self) _ = try system._spawn("discovery", DiscoveryShell(settings: settings, cluster: clusterProbe.ref).behavior) @@ -97,23 +97,23 @@ final class ClusterDiscoveryTests: SingleClusterSystemXCTestCase { discovery.subscribed.wait() // [A], join A - discovery.sendNext(.success([ExampleK8sInstance(node: self.A.uniqueNode.node)])) + discovery.sendNext(.success([ExampleK8sInstance(endpoint: self.A.node.endpoint)])) guard case .command(.handshakeWith(let node1)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node1.shouldEqual(self.A.uniqueNode.node) + node1.shouldEqual(self.A.node.endpoint) // [A, B], join B - discovery.sendNext(.success([ExampleK8sInstance(node: self.A.uniqueNode.node), ExampleK8sInstance(node: self.B.uniqueNode.node)])) + discovery.sendNext(.success([ExampleK8sInstance(endpoint: self.A.node.endpoint), ExampleK8sInstance(endpoint: self.B.node.endpoint)])) guard case .command(.handshakeWith(let node2)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node2.shouldEqual(self.B.uniqueNode.node) + node2.shouldEqual(self.B.node.endpoint) try clusterProbe.expectNoMessage(for: .milliseconds(300)) // i.e. it should not send another join for `A` we already did that } func test_discovery_stoppingActor_shouldCancelSubscription() throws { - let discovery = TestTriggeredServiceDiscovery() + let discovery = TestTriggeredServiceDiscovery() let settings = ServiceDiscoverySettings(discovery, service: "example") let clusterProbe = testKit.makeTestProbe(expecting: ClusterShell.Message.self) let ref = try system._spawn("discovery", DiscoveryShell(settings: settings, cluster: clusterProbe.ref).behavior) @@ -121,11 +121,11 @@ final class ClusterDiscoveryTests: SingleClusterSystemXCTestCase { discovery.subscribed.wait() // [A], join A - discovery.sendNext(.success([self.A.uniqueNode.node])) + discovery.sendNext(.success([self.A.node.endpoint])) guard case .command(.handshakeWith(let node1)) = try clusterProbe.expectMessage() else { throw testKit.fail(line: #line - 1) } - node1.shouldEqual(self.A.uniqueNode.node) + node1.shouldEqual(self.A.node.endpoint) ref._sendSystemMessage(.stop) _ = discovery.cancelled.wait(atMost: .seconds(3)) diff --git a/Tests/DistributedClusterTests/Cluster/ClusterEventStreamTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterEventStreamTests.swift index d199f57aa..239de5501 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterEventStreamTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterEventStreamTests.swift @@ -18,8 +18,8 @@ import NIO import XCTest final class ClusterEventStreamTests: SingleClusterSystemXCTestCase, @unchecked Sendable { - let memberA = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) - let memberB = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) + let memberA = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) + let memberB = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) func test_clusterEventStream_shouldNotCauseDeadLettersOnLocalOnlySystem() throws { _ = try self.system._spawn("anything", of: String.self, .setup { context in @@ -52,13 +52,13 @@ final class ClusterEventStreamTests: SingleClusterSystemXCTestCase, @unchecked S } switch try p1.expectMessage() { case .membershipChange(let change): - change.node.shouldEqual(self.memberA.uniqueNode) + change.node.shouldEqual(self.memberA.node) default: throw p1.error("Expected a membershipChange") } switch try p1.expectMessage() { case .membershipChange(let change): - change.node.shouldEqual(self.memberB.uniqueNode) + change.node.shouldEqual(self.memberB.node) default: throw p1.error("Expected a membershipChange") } @@ -67,14 +67,14 @@ final class ClusterEventStreamTests: SingleClusterSystemXCTestCase, @unchecked S switch try p2.expectMessage() { case .snapshot(let snapshot): - snapshot.uniqueMember(self.memberA.uniqueNode).shouldEqual(self.memberA) + snapshot.uniqueMember(self.memberA.node).shouldEqual(self.memberA) () // ok default: throw p2.error("Expected a snapshot first") } switch try p2.expectMessage() { case .membershipChange(let change): - change.node.shouldEqual(self.memberB.uniqueNode) + change.node.shouldEqual(self.memberB.node) default: throw p2.error("Expected a membershipChange") } diff --git a/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsClusteredTests.swift index 8a5212785..8d8eba859 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsClusteredTests.swift @@ -26,7 +26,7 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase throw XCTSkip("!!! Skipping known flaky test \(#function) !!!") // FIXME(distributed): revisit and fix https://github.com/apple/swift-distributed-actors/issues/945 let first = await setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 1) } @@ -52,9 +52,9 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase switch try p.expectMessage() { case .leadershipChange(let change): guard let leader = change.newLeader else { - throw self.testKit(first).fail("Expected \(first.cluster.uniqueNode) to be leader") + throw self.testKit(first).fail("Expected \(first.cluster.node) to be leader") } - leader.uniqueNode.shouldEqual(first.cluster.uniqueNode) + leader.node.shouldEqual(first.cluster.node) default: throw self.testKit(first).fail("Expected leader change event") } @@ -62,36 +62,36 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase func test_joining_to_up_decisionByLeader() async throws { let first = await setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) } let second = await setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) } let third = await setUpNode("third") { settings in - settings.node.port = 9333 + settings.endpoint.port = 9333 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) } - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: second.cluster.node.endpoint) - try assertAssociated(first, withAtLeast: second.cluster.uniqueNode) - try assertAssociated(second, withAtLeast: third.cluster.uniqueNode) - try assertAssociated(first, withAtLeast: third.cluster.uniqueNode) + try assertAssociated(first, withAtLeast: second.cluster.node) + try assertAssociated(second, withAtLeast: third.cluster.node) + try assertAssociated(first, withAtLeast: third.cluster.node) - try await self.assertMemberStatus(on: first, node: first.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: first, node: second.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: first, node: third.cluster.uniqueNode, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: first, node: first.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: first, node: second.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: first, node: third.cluster.node, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: second, node: first.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: second, node: second.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: second, node: third.cluster.uniqueNode, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: first.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: second.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: third.cluster.node, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: third, node: first.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: third, node: second.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: third, node: third.cluster.uniqueNode, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: third, node: first.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: third, node: second.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: third, node: third.cluster.node, is: .up, within: .seconds(10)) } func test_joining_to_up_earlyYetStillLettingAllNodesKnowAboutLatestMembershipStatus() async throws { @@ -123,17 +123,17 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase settings.autoLeaderElection = .none // even without election running, it will be notified by things by the others } - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: second.cluster.uniqueNode.node) - try await self.ensureNodes(.up, within: .seconds(10), nodes: first.cluster.uniqueNode, second.cluster.uniqueNode, third.cluster.uniqueNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: second.cluster.node.endpoint) + try await self.ensureNodes(.up, within: .seconds(10), nodes: first.cluster.node, second.cluster.node, third.cluster.node) // Even the fourth node now could join and be notified about all the existing up members. // It does not even have to run any leadership election -- there are leaders in the cluster. // // We only join one arbitrary node, we will be notified about all nodes: - fourth.cluster.join(node: third.cluster.uniqueNode.node) + fourth.cluster.join(endpoint: third.cluster.node.endpoint) - try await self.ensureNodes(.up, within: .seconds(10), nodes: first.cluster.uniqueNode, second.cluster.uniqueNode, third.cluster.uniqueNode, fourth.cluster.uniqueNode) + try await self.ensureNodes(.up, within: .seconds(10), nodes: first.cluster.node, second.cluster.node, third.cluster.node, fourth.cluster.node) } func test_up_ensureAllSubscribersGetMovingUpEvents() async throws { @@ -156,10 +156,10 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase let p2 = self.testKit(second).makeTestProbe(expecting: Cluster.Event.self) await second.cluster.events._subscribe(p2.ref) - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) // this ensures that the membership, as seen in ClusterShell converged on all members being up - try await self.ensureNodes(.up, nodes: first.cluster.uniqueNode, second.cluster.uniqueNode) + try await self.ensureNodes(.up, nodes: first.cluster.node, second.cluster.node) // the following tests confirm that the manually subscribed actors, got all the events they expected func assertExpectedClusterEvents(events: [Cluster.Event], probe: ActorTestProbe) throws { // the specific type of snapshot we get is slightly racy: it could be .empty or contain already the node itself @@ -178,7 +178,7 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase }.count.shouldEqual(2) // both nodes moved to up // the leader is the right node - events.shouldContain(.leadershipChange(Cluster.LeadershipChange(oldLeader: nil, newLeader: .init(node: first.cluster.uniqueNode, status: .joining))!)) // !-safe, since new/old leader known to be different + events.shouldContain(.leadershipChange(Cluster.LeadershipChange(oldLeader: nil, newLeader: .init(node: first.cluster.node, status: .joining))!)) // !-safe, since new/old leader known to be different } // collect all events until we see leadership change; we should already have seen members become up then @@ -239,10 +239,10 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase try await self.joinNodes(node: second, with: third) try await self.joinNodes(node: first, with: third) - let secondNode = second.cluster.uniqueNode - try await self.ensureNodes(.up, nodes: first.cluster.uniqueNode, secondNode, third.cluster.uniqueNode) + let secondNode = second.cluster.node + try await self.ensureNodes(.up, nodes: first.cluster.node, secondNode, third.cluster.node) - first.cluster.down(node: secondNode.node) + first.cluster.down(endpoint: secondNode.endpoint) // other nodes have observed it down try await self.ensureNodes(atLeast: .down, on: first, nodes: secondNode) @@ -269,11 +269,11 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase // snapshot(first joining) // are both legal eventsOnFirstSub.shouldContain(.membershipChange(.init(node: secondNode, previousStatus: nil, toStatus: .joining))) - eventsOnFirstSub.shouldContain(.membershipChange(.init(node: first.cluster.uniqueNode, previousStatus: .joining, toStatus: .up))) + eventsOnFirstSub.shouldContain(.membershipChange(.init(node: first.cluster.node, previousStatus: .joining, toStatus: .up))) eventsOnFirstSub.shouldContain(.membershipChange(.init(node: secondNode, previousStatus: .joining, toStatus: .up))) - eventsOnFirstSub.shouldContain(.leadershipChange(Cluster.LeadershipChange(oldLeader: nil, newLeader: .init(node: first.cluster.uniqueNode, status: .joining))!)) // !-safe, since new/old leader known to be different - eventsOnFirstSub.shouldContain(.membershipChange(.init(node: third.cluster.uniqueNode, previousStatus: nil, toStatus: .joining))) - eventsOnFirstSub.shouldContain(.membershipChange(.init(node: third.cluster.uniqueNode, previousStatus: .joining, toStatus: .up))) + eventsOnFirstSub.shouldContain(.leadershipChange(Cluster.LeadershipChange(oldLeader: nil, newLeader: .init(node: first.cluster.node, status: .joining))!)) // !-safe, since new/old leader known to be different + eventsOnFirstSub.shouldContain(.membershipChange(.init(node: third.cluster.node, previousStatus: nil, toStatus: .joining))) + eventsOnFirstSub.shouldContain(.membershipChange(.init(node: third.cluster.node, previousStatus: .joining, toStatus: .up))) eventsOnFirstSub.shouldContain(.membershipChange(.init(node: secondNode, previousStatus: .up, toStatus: .down))) @@ -315,28 +315,28 @@ final class ClusterLeaderActionsClusteredTests: ClusteredActorSystemsXCTestCase try await self.joinNodes(node: second, with: third) try await self.joinNodes(node: first, with: third) - try await self.ensureNodes(.up, nodes: first.cluster.uniqueNode, second.cluster.uniqueNode, third.cluster.uniqueNode) + try await self.ensureNodes(.up, nodes: first.cluster.node, second.cluster.node, third.cluster.node) // crash the second node try second.shutdown() // other nodes have observed it down - try await self.ensureNodes(atLeast: .down, on: first, within: .seconds(15), nodes: second.cluster.uniqueNode) - try await self.ensureNodes(atLeast: .down, on: third, within: .seconds(15), nodes: second.cluster.uniqueNode) + try await self.ensureNodes(atLeast: .down, on: first, within: .seconds(15), nodes: second.cluster.node) + try await self.ensureNodes(atLeast: .down, on: third, within: .seconds(15), nodes: second.cluster.node) // on the leader node, the other node noticed as up: let testKit = self.testKit(first) try testKit.eventually(within: .seconds(20)) { let event: Cluster.Event? = try p1.maybeExpectMessage() switch event { - case .membershipChange(.init(node: second.cluster.uniqueNode, previousStatus: .up, toStatus: .down)): () + case .membershipChange(.init(node: second.cluster.node, previousStatus: .up, toStatus: .down)): () case let other: throw testKit.error("Expected `second` [ up] -> [ .down], on first node, was: \(other, orElse: "nil")") } } try testKit.eventually(within: .seconds(20)) { let event: Cluster.Event? = try p1.maybeExpectMessage() switch event { - case .membershipChange(.init(node: second.cluster.uniqueNode, previousStatus: .down, toStatus: .removed)): () + case .membershipChange(.init(node: second.cluster.node, previousStatus: .down, toStatus: .removed)): () case let other: throw testKit.error("Expected `second` [ up] -> [ .down], on first node, was: \(other, orElse: "nil")") } } diff --git a/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsTests.swift index 4e943ddb6..d7e1e3996 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterLeaderActionsTests.swift @@ -20,11 +20,11 @@ import XCTest // Unit tests of the actions, see `ClusterLeaderActionsClusteredTests` for integration tests final class ClusterLeaderActionsTests: XCTestCase { - let _nodeA = Node(systemName: "nodeA", host: "1.1.1.1", port: 7337) - let _nodeB = Node(systemName: "nodeB", host: "2.2.2.2", port: 8228) - let _nodeC = Node(systemName: "nodeC", host: "3.3.3.3", port: 9119) + let _endpointA = Cluster.Endpoint(systemName: "nodeA", host: "1.1.1.1", port: 7337) + let _endpointB = Cluster.Endpoint(systemName: "nodeB", host: "2.2.2.2", port: 8228) + let _endpointC = Cluster.Endpoint(systemName: "nodeC", host: "3.3.3.3", port: 9119) - var allNodes: [UniqueNode] { + var allNodes: [Cluster.Node] { [self.nodeA, self.nodeB, self.nodeC] } @@ -32,27 +32,27 @@ final class ClusterLeaderActionsTests: XCTestCase { var stateB: ClusterShellState! var stateC: ClusterShellState! - var nodeA: UniqueNode { + var nodeA: Cluster.Node { self.stateA.selfNode } - var nodeB: UniqueNode { + var nodeB: Cluster.Node { self.stateB.selfNode } - var nodeC: UniqueNode { + var nodeC: Cluster.Node { self.stateC.selfNode } override func setUp() { self.stateA = ClusterShellState.makeTestMock(side: .server) { settings in - settings.node = self._nodeA + settings.endpoint = self._endpointA } self.stateB = ClusterShellState.makeTestMock(side: .server) { settings in - settings.node = self._nodeB + settings.endpoint = self._endpointB } self.stateC = ClusterShellState.makeTestMock(side: .server) { settings in - settings.node = self._nodeC + settings.endpoint = self._endpointC } _ = self.stateA.membership.join(self.nodeA) @@ -79,7 +79,7 @@ final class ClusterLeaderActionsTests: XCTestCase { func test_leaderActions_removeDownMembers_ifKnownAsDownToAllMembers() { // make A the leader - let makeFirstTheLeader = Cluster.LeadershipChange(oldLeader: nil, newLeader: self.stateA.membership.member(self.nodeA.node)!)! + let makeFirstTheLeader = Cluster.LeadershipChange(oldLeader: nil, newLeader: self.stateA.membership.member(self.nodeA.endpoint)!)! _ = self.stateA.applyClusterEvent(.leadershipChange(makeFirstTheLeader)) // time to mark B as .down @@ -119,7 +119,7 @@ final class ClusterLeaderActionsTests: XCTestCase { return } member.status.isDown.shouldBeTrue() - member.uniqueNode.shouldEqual(self.nodeB) + member.node.shouldEqual(self.nodeB) // interpret leader actions would interpret it by removing the member now and tombstone-ing it, // see `interpretLeaderActions` diff --git a/Tests/DistributedClusterTests/Cluster/ClusterMembershipSnapshotTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterMembershipSnapshotTests.swift index e5f77ecc1..5efc8518d 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterMembershipSnapshotTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterMembershipSnapshotTests.swift @@ -23,7 +23,7 @@ final class ClusterMembershipSnapshotTests: ClusteredActorSystemsXCTestCase { let testKit: ActorTestKit = self.testKit(system) try await testKit.eventually(within: .seconds(5)) { await system.cluster.membershipSnapshot.members(atLeast: .joining).shouldContain( - Cluster.Member(node: system.cluster.uniqueNode, status: .joining) + Cluster.Member(node: system.cluster.node, status: .joining) ) } } @@ -44,10 +44,10 @@ final class ClusterMembershipSnapshotTests: ClusteredActorSystemsXCTestCase { throw testKit.error(line: #line - 1) } - let nodes: [UniqueNode] = snapshot.members(atMost: .up).map(\.uniqueNode) - nodes.shouldContain(first.cluster.uniqueNode) - nodes.shouldContain(second.cluster.uniqueNode) - nodes.shouldContain(third.cluster.uniqueNode) + let nodes: [Cluster.Node] = snapshot.members(atMost: .up).map(\.node) + nodes.shouldContain(first.cluster.node) + nodes.shouldContain(second.cluster.node) + nodes.shouldContain(third.cluster.node) } } @@ -71,7 +71,7 @@ final class ClusterMembershipSnapshotTests: ClusteredActorSystemsXCTestCase { // but the snapshot already knows about all of them. snapshot.count.shouldBeGreaterThanOrEqual(membership.count) membership.members(atLeast: .joining).forEach { mm in - if let nm = snapshot.uniqueMember(mm.uniqueNode) { + if let nm = snapshot.uniqueMember(mm.node) { nm.status.shouldBeGreaterThanOrEqual(mm.status) } } diff --git a/Tests/DistributedClusterTests/Cluster/ClusterOnDownActionTests.swift b/Tests/DistributedClusterTests/Cluster/ClusterOnDownActionTests.swift index a61bb9a39..df0339223 100644 --- a/Tests/DistributedClusterTests/Cluster/ClusterOnDownActionTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ClusterOnDownActionTests.swift @@ -25,7 +25,7 @@ final class ClusterOnDownActionTests: ClusteredActorSystemsXCTestCase { try await self.joinNodes(node: first, with: second) - second.cluster.down(node: first.cluster.uniqueNode.node) + second.cluster.down(endpoint: first.cluster.node.endpoint) try self.capturedLogs(of: first).awaitLogContaining(self.testKit(first), text: "Self node was marked [.down]!") @@ -43,7 +43,7 @@ final class ClusterOnDownActionTests: ClusteredActorSystemsXCTestCase { try await self.joinNodes(node: first, with: second) - second.cluster.down(node: first.cluster.uniqueNode.node) + second.cluster.down(endpoint: first.cluster.node.endpoint) try self.capturedLogs(of: first).awaitLogContaining(self.testKit(first), text: "Self node was marked [.down]!") diff --git a/Tests/DistributedClusterTests/Cluster/DowningStrategy/DowningClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/DowningStrategy/DowningClusteredTests.swift index 017b508c6..a718aeef6 100644 --- a/Tests/DistributedClusterTests/Cluster/DowningStrategy/DowningClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/DowningStrategy/DowningClusteredTests.swift @@ -75,7 +75,7 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { otherNotDownPairSystem = first } - let expectedDownNode = expectedDownSystem.cluster.uniqueNode + let expectedDownNode = expectedDownSystem.cluster.node // we start cluster event probes early, so they get the events one by one as they happen let eventsProbeOther = await self.testKit(otherNotDownPairSystem).spawnClusterEventStreamTestProbe() @@ -86,35 +86,35 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { case (.leaveSelfNode, .firstLeader): first.cluster.leave() case (.leaveSelfNode, .secondNonLeader): second.cluster.leave() - case (.downSelf, .firstLeader): first.cluster.down(node: first.cluster.uniqueNode.node) - case (.downSelf, .secondNonLeader): second.cluster.down(node: second.cluster.uniqueNode.node) + case (.downSelf, .firstLeader): first.cluster.down(endpoint: first.cluster.node.endpoint) + case (.downSelf, .secondNonLeader): second.cluster.down(endpoint: second.cluster.node.endpoint) case (.shutdownSelf, .firstLeader): try first.shutdown() case (.shutdownSelf, .secondNonLeader): try second.shutdown() - case (.downFromOtherMember, .firstLeader): second.cluster.down(node: first.cluster.uniqueNode.node) - case (.downFromOtherMember, .secondNonLeader): thirdNeverDownSystem.cluster.down(node: second.cluster.uniqueNode.node) + case (.downFromOtherMember, .firstLeader): second.cluster.down(endpoint: first.cluster.node.endpoint) + case (.downFromOtherMember, .secondNonLeader): thirdNeverDownSystem.cluster.down(endpoint: second.cluster.node.endpoint) } func expectedDownMemberEventsFishing( on: ClusterSystem, file: StaticString = #filePath, line: UInt = #line ) -> (Cluster.Event) -> ActorTestProbe.FishingDirective { - pinfo("Expecting [\(expectedDownSystem)] to become [.down] on [\(on.cluster.uniqueNode.node)], method to stop the node [\(stopMethod)]") + pinfo("Expecting [\(expectedDownSystem)] to become [.down] on [\(on.cluster.node.endpoint)], method to stop the node [\(stopMethod)]") return { event in switch event { case .membershipChange(let change) where change.node == expectedDownNode && change.isRemoval: - pinfo("\(on.cluster.uniqueNode.node): \(change)", file: (file), line: line) + pinfo("\(on.cluster.node.endpoint): \(change)", file: (file), line: line) return .catchComplete(change) case .membershipChange(let change) where change.node == expectedDownNode: - pinfo("\(on.cluster.uniqueNode.node): \(change)", file: (file), line: line) + pinfo("\(on.cluster.node.endpoint): \(change)", file: (file), line: line) return .catchContinue(change) - case .reachabilityChange(let change) where change.member.uniqueNode == expectedDownNode: - pnote("\(on.cluster.uniqueNode.node): \(change)", file: (file), line: line) + case .reachabilityChange(let change) where change.member.node == expectedDownNode: + pnote("\(on.cluster.node.endpoint): \(change)", file: (file), line: line) return .ignore default: - pnote("\(on.cluster.uniqueNode.node): \(event)", file: (file), line: line) + pnote("\(on.cluster.node.endpoint): \(event)", file: (file), line: line) return .ignore } } @@ -232,16 +232,16 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { } let first = nodes.first! - var probes: [UniqueNode: ActorTestProbe] = [:] + var probes: [Cluster.Node: ActorTestProbe] = [:] for remainingNode in nodes { - probes[remainingNode.cluster.uniqueNode] = await self.testKit(remainingNode).spawnClusterEventStreamTestProbe() + probes[remainingNode.cluster.node] = await self.testKit(remainingNode).spawnClusterEventStreamTestProbe() } pinfo("Joining \(nodes.count) nodes...") let joiningStart = ContinuousClock.Instant.now - nodes.forEach { first.cluster.join(node: $0.cluster.uniqueNode.node) } - try await self.ensureNodes(.up, within: .seconds(30), nodes: nodes.map(\.cluster.uniqueNode)) + nodes.forEach { first.cluster.join(endpoint: $0.cluster.node.endpoint) } + try await self.ensureNodes(.up, within: .seconds(30), nodes: nodes.map(\.cluster.node)) let joiningStop = ContinuousClock.Instant.now pinfo("Joined \(nodes.count) nodes, took: \((joiningStop - joiningStart).prettyDescription)") @@ -250,7 +250,7 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { var remainingNodes = nodes remainingNodes.removeFirst(nodesToDown.count) - pinfo("Downing \(nodesToDown.count) nodes: \(nodesToDown.map(\.cluster.uniqueNode))") + pinfo("Downing \(nodesToDown.count) nodes: \(nodesToDown.map(\.cluster.node))") for node in nodesToDown { try! await node.shutdown().wait() } @@ -259,13 +259,13 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { on: ClusterSystem, file: StaticString = #filePath, line: UInt = #line ) -> (Cluster.Event) -> ActorTestProbe.FishingDirective { - pinfo("Expecting \(nodesToDown.map(\.cluster.uniqueNode.node)) to become [.down] on [\(on.cluster.uniqueNode.node)]") + pinfo("Expecting \(nodesToDown.map(\.cluster.node.endpoint)) to become [.down] on [\(on.cluster.node.endpoint)]") var removalsFound = 0 return { event in switch event { case .membershipChange(let change) where change.isRemoval: - pinfo("\(on.cluster.uniqueNode.node): \(change)", file: file, line: line) + pinfo("\(on.cluster.node.endpoint): \(change)", file: file, line: line) removalsFound += 1 if removalsFound == nodesToDown.count { return .catchComplete(change) @@ -273,7 +273,7 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { return .catchContinue(change) } case .membershipChange(let change) where change.isDown: - pinfo("\(on.cluster.uniqueNode.node): \(change)", file: file, line: line) + pinfo("\(on.cluster.node.endpoint): \(change)", file: file, line: line) return .catchContinue(change) default: return .ignore @@ -282,12 +282,12 @@ final class DowningClusteredTests: ClusteredActorSystemsXCTestCase { } for remainingNode in remainingNodes { - let probe = probes[remainingNode.cluster.uniqueNode]! + let probe = probes[remainingNode.cluster.node]! let events = try probe.fishFor(Cluster.MembershipChange.self, within: .seconds(60), expectedDownMemberEventsFishing(on: remainingNode)) events.shouldContain(where: { change in change.status.isDown && (change.previousStatus == .joining || change.previousStatus == .up) }) for expectedDownNode in nodesToDown { - events.shouldContain(Cluster.MembershipChange(node: expectedDownNode.cluster.uniqueNode, previousStatus: .down, toStatus: .removed)) + events.shouldContain(Cluster.MembershipChange(node: expectedDownNode.cluster.node, previousStatus: .down, toStatus: .removed)) } } } diff --git a/Tests/DistributedClusterTests/Cluster/DowningStrategy/TimeoutBasedDowningInstanceTests.swift b/Tests/DistributedClusterTests/Cluster/DowningStrategy/TimeoutBasedDowningInstanceTests.swift index b46bb16d7..1b54227f7 100644 --- a/Tests/DistributedClusterTests/Cluster/DowningStrategy/TimeoutBasedDowningInstanceTests.swift +++ b/Tests/DistributedClusterTests/Cluster/DowningStrategy/TimeoutBasedDowningInstanceTests.swift @@ -19,16 +19,16 @@ import XCTest final class TimeoutBasedDowningInstanceTests: XCTestCase { var instance: TimeoutBasedDowningStrategy! - let selfNode = UniqueNode(node: Node(systemName: "Test", host: "localhost", port: 8888), nid: .random()) + let selfNode = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "Test", host: "localhost", port: 8888), nid: .random()) lazy var selfMember = Cluster.Member(node: self.selfNode, status: .up) - let otherNode = UniqueNode(node: Node(systemName: "Test", host: "localhost", port: 9999), nid: .random()) + let otherNode = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "Test", host: "localhost", port: 9999), nid: .random()) lazy var otherMember = Cluster.Member(node: self.otherNode, status: .up) - let yetAnotherNode = UniqueNode(node: Node(systemName: "Test", host: "localhost", port: 2222), nid: .random()) + let yetAnotherNode = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "Test", host: "localhost", port: 2222), nid: .random()) lazy var yetAnotherMember = Cluster.Member(node: self.yetAnotherNode, status: .up) - let nonMemberNode = UniqueNode(node: Node(systemName: "Test", host: "localhost", port: 1111), nid: .random()) + let nonMemberNode = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "Test", host: "localhost", port: 1111), nid: .random()) lazy var nonMember = Cluster.Member(node: self.nonMemberNode, status: .up) override func setUp() { diff --git a/Tests/DistributedClusterTests/Cluster/GossipSeenTableTests.swift b/Tests/DistributedClusterTests/Cluster/GossipSeenTableTests.swift index 6fa0c37fc..73f9da09f 100644 --- a/Tests/DistributedClusterTests/Cluster/GossipSeenTableTests.swift +++ b/Tests/DistributedClusterTests/Cluster/GossipSeenTableTests.swift @@ -21,9 +21,9 @@ import XCTest final class GossipSeenTableTests: XCTestCase { typealias SeenTable = Cluster.MembershipGossip.SeenTable - var nodeA: UniqueNode! - var nodeB: UniqueNode! - var nodeC: UniqueNode! + var nodeA: Cluster.Node! + var nodeB: Cluster.Node! + var nodeC: Cluster.Node! lazy var allNodes = [ self.nodeA!, self.nodeB!, self.nodeC!, @@ -31,9 +31,9 @@ final class GossipSeenTableTests: XCTestCase { override func setUp() { super.setUp() - self.nodeA = UniqueNode(protocol: "sact", systemName: "firstA", host: "127.0.0.1", port: 7111, nid: .random()) - self.nodeB = UniqueNode(protocol: "sact", systemName: "secondB", host: "127.0.0.1", port: 7222, nid: .random()) - self.nodeC = UniqueNode(protocol: "sact", systemName: "thirdC", host: "127.0.0.1", port: 7333, nid: .random()) + self.nodeA = Cluster.Node(systemName: "firstA", host: "127.0.0.1", port: 7111, nid: .random()) + self.nodeB = Cluster.Node(systemName: "secondB", host: "127.0.0.1", port: 7222, nid: .random()) + self.nodeC = Cluster.Node(systemName: "thirdC", host: "127.0.0.1", port: 7333, nid: .random()) } func test_seenTable_compare_concurrent_eachOtherDontKnown() { diff --git a/Tests/DistributedClusterTests/Cluster/LeadershipTests.swift b/Tests/DistributedClusterTests/Cluster/LeadershipTests.swift index 30b82939f..9c5b76682 100644 --- a/Tests/DistributedClusterTests/Cluster/LeadershipTests.swift +++ b/Tests/DistributedClusterTests/Cluster/LeadershipTests.swift @@ -19,10 +19,10 @@ import NIO import XCTest final class LeadershipTests: XCTestCase { - let memberA = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) - let memberB = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) - let memberC = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "3.3.3.3", port: 9119), nid: .random()), status: .up) - let newMember = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .up) + let memberA = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "1.1.1.1", port: 7337), nid: .random()), status: .up) + let memberB = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "2.2.2.2", port: 8228), nid: .random()), status: .up) + let memberC = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "3.3.3.3", port: 9119), nid: .random()), status: .up) + let newMember = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .up) let fakeContext = LeaderElectionContext(log: NoopLogger.make(), eventLoop: EmbeddedEventLoop()) @@ -46,13 +46,13 @@ final class LeadershipTests: XCTestCase { var election = Leadership.LowestReachableMember(minimumNrOfMembers: 3) var membership = self.initialMembership - _ = membership.removeCompletely(self.memberA.uniqueNode) + _ = membership.removeCompletely(self.memberA.node) // 2 members -> not enough to make decision anymore let change1: Cluster.LeadershipChange? = try election.runElection(context: self.fakeContext, membership: membership).future.wait() change1.shouldBeNil() - _ = membership.join(self.newMember.uniqueNode) + _ = membership.join(self.newMember.node) // 3 members again, should work let change2: Cluster.LeadershipChange? = try election.runElection(context: self.fakeContext, membership: membership).future.wait() @@ -63,13 +63,13 @@ final class LeadershipTests: XCTestCase { var election = Leadership.LowestReachableMember(minimumNrOfMembers: 3) var membership = self.initialMembership - _ = membership.mark(self.memberB.uniqueNode, reachability: .unreachable) + _ = membership.mark(self.memberB.node, reachability: .unreachable) // 2 reachable members -> not enough to make decision anymore let change1: Cluster.LeadershipChange? = try election.runElection(context: self.fakeContext, membership: membership).future.wait() change1.shouldBeNil() - _ = membership.join(self.newMember.uniqueNode) + _ = membership.join(self.newMember.node) // 3 reachable members again, 1 unreachable, should work let change2: Cluster.LeadershipChange? = try election.runElection(context: self.fakeContext, membership: membership).future.wait() @@ -80,8 +80,8 @@ final class LeadershipTests: XCTestCase { var election = Leadership.LowestReachableMember(minimumNrOfMembers: 3) var membership = self.initialMembership - _ = membership.mark(self.memberA.uniqueNode, reachability: .unreachable) - _ = membership.mark(self.memberB.uniqueNode, reachability: .unreachable) + _ = membership.mark(self.memberA.node, reachability: .unreachable) + _ = membership.mark(self.memberB.node, reachability: .unreachable) // 1 reachable member -> not enough to make decision anymore let change1: Cluster.LeadershipChange? = try election.runElection(context: self.fakeContext, membership: membership).future.wait() @@ -98,7 +98,7 @@ final class LeadershipTests: XCTestCase { leader.shouldEqual(self.memberA) // leader is down: - _ = membership.mark(self.memberA.uniqueNode, as: .down) + _ = membership.mark(self.memberA.node, as: .down) // 2 members -> not enough to make decision anymore // Since we go from a leader to without, there should be a change @@ -111,12 +111,12 @@ final class LeadershipTests: XCTestCase { var election = Leadership.LowestReachableMember(minimumNrOfMembers: 3) var membership = self.initialMembership - _ = membership.join(self.newMember.uniqueNode) + _ = membership.join(self.newMember.node) (try election.runElection(context: self.fakeContext, membership: membership).future.wait()) .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberA)) - _ = membership.mark(self.memberA.uniqueNode, as: .down) + _ = membership.mark(self.memberA.node, as: .down) (try election.runElection(context: self.fakeContext, membership: membership).future.wait()) .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberB)) } @@ -125,12 +125,12 @@ final class LeadershipTests: XCTestCase { var election = Leadership.LowestReachableMember(minimumNrOfMembers: 3) var membership = self.initialMembership - _ = membership.join(self.newMember.uniqueNode) + _ = membership.join(self.newMember.node) (try election.runElection(context: self.fakeContext, membership: membership).future.wait()) .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberA)) - _ = membership.mark(self.memberA.uniqueNode, as: .down) + _ = membership.mark(self.memberA.node, as: .down) (try election.runElection(context: self.fakeContext, membership: membership).future.wait()) .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberB)) } @@ -150,7 +150,7 @@ final class LeadershipTests: XCTestCase { .map(applyToMembership) .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberA)) - _ = membership.mark(self.memberA.uniqueNode, reachability: .unreachable) + _ = membership.mark(self.memberA.node, reachability: .unreachable) try election.runElection(context: self.fakeContext, membership: membership).future.wait() .map(applyToMembership) .shouldEqual(nil) @@ -179,14 +179,14 @@ final class LeadershipTests: XCTestCase { .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberA)) // down third - _ = membership.mark(self.memberC.uniqueNode, as: .down) + _ = membership.mark(self.memberC.node, as: .down) // no reason to remove the leadership from the first node try election.runElection(context: self.fakeContext, membership: membership).future.wait() .map(applyToMembership) .shouldEqual(nil) // down second - _ = membership.mark(self.memberB.uniqueNode, as: .down) + _ = membership.mark(self.memberB.node, as: .down) // STILL no reason to remove the leadership from the first node try election.runElection(context: self.fakeContext, membership: membership).future.wait() .map(applyToMembership) @@ -215,14 +215,14 @@ final class LeadershipTests: XCTestCase { .shouldEqual(Cluster.LeadershipChange(oldLeader: nil, newLeader: self.memberA)) // down third - _ = membership.mark(self.memberC.uniqueNode, as: .down) + _ = membership.mark(self.memberC.node, as: .down) // no reason to remove the leadership from the first node try election.runElection(context: self.fakeContext, membership: membership).future.wait() .map(applyToMembership) .shouldEqual(Cluster.LeadershipChange(oldLeader: self.memberA, newLeader: nil)) // down second - _ = membership.mark(self.memberB.uniqueNode, as: .down) + _ = membership.mark(self.memberB.node, as: .down) // STILL no reason to remove the leadership from the first node try election.runElection(context: self.fakeContext, membership: membership).future.wait() .map(applyToMembership) diff --git a/Tests/DistributedClusterTests/Cluster/MembershipGossipClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/MembershipGossipClusteredTests.swift index 9147d1eea..da0a443e6 100644 --- a/Tests/DistributedClusterTests/Cluster/MembershipGossipClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/MembershipGossipClusteredTests.swift @@ -52,26 +52,26 @@ final class MembershipGossipClusteredTests: ClusteredActorSystemsXCTestCase { settings.onDownAction = .none } - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: second.cluster.node.endpoint) - try assertAssociated(first, withAtLeast: second.cluster.uniqueNode) - try assertAssociated(second, withAtLeast: third.cluster.uniqueNode) - try assertAssociated(first, withAtLeast: third.cluster.uniqueNode) + try assertAssociated(first, withAtLeast: second.cluster.node) + try assertAssociated(second, withAtLeast: third.cluster.node) + try assertAssociated(first, withAtLeast: third.cluster.node) - try await self.assertMemberStatus(on: second, node: first.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: second, node: second.cluster.uniqueNode, is: .up, within: .seconds(10)) - try await self.assertMemberStatus(on: second, node: third.cluster.uniqueNode, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: first.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: second.cluster.node, is: .up, within: .seconds(10)) + try await self.assertMemberStatus(on: second, node: third.cluster.node, is: .up, within: .seconds(10)) let firstEvents = await testKit(first).spawnClusterEventStreamTestProbe() let secondEvents = await testKit(second).spawnClusterEventStreamTestProbe() let thirdEvents = await testKit(third).spawnClusterEventStreamTestProbe() - second.cluster.down(node: third.cluster.uniqueNode.node) + second.cluster.down(endpoint: third.cluster.node.endpoint) - try self.assertMemberDown(firstEvents, node: third.cluster.uniqueNode) - try self.assertMemberDown(secondEvents, node: third.cluster.uniqueNode) - try self.assertMemberDown(thirdEvents, node: third.cluster.uniqueNode) + try self.assertMemberDown(firstEvents, node: third.cluster.node) + try self.assertMemberDown(secondEvents, node: third.cluster.node) + try self.assertMemberDown(thirdEvents, node: third.cluster.node) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -79,38 +79,38 @@ final class MembershipGossipClusteredTests: ClusteredActorSystemsXCTestCase { func test_join_swimDiscovered_thirdNode() async throws { let first = await setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 } let second = await setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 } let third = await setUpNode("third") { settings in - settings.node.port = 9333 + settings.endpoint.port = 9333 } // 1. first join second - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) // 2. third join second - third.cluster.join(node: second.cluster.uniqueNode.node) + third.cluster.join(endpoint: second.cluster.node.endpoint) // confirm 1 - try assertAssociated(first, withAtLeast: second.cluster.uniqueNode) - try assertAssociated(second, withAtLeast: first.cluster.uniqueNode) + try assertAssociated(first, withAtLeast: second.cluster.node) + try assertAssociated(second, withAtLeast: first.cluster.node) pinfo("Associated: first <~> second") // confirm 2 - try assertAssociated(third, withAtLeast: second.cluster.uniqueNode) - try assertAssociated(second, withAtLeast: third.cluster.uniqueNode) + try assertAssociated(third, withAtLeast: second.cluster.node) + try assertAssociated(second, withAtLeast: third.cluster.node) pinfo("Associated: second <~> third") // 3.1. first should discover third // confirm 3.1 - try assertAssociated(first, withAtLeast: third.cluster.uniqueNode) + try assertAssociated(first, withAtLeast: third.cluster.node) pinfo("Associated: first ~> third") // 3.2. third should discover first // confirm 3.2 - try assertAssociated(third, withAtLeast: first.cluster.uniqueNode) + try assertAssociated(third, withAtLeast: first.cluster.node) pinfo("Associated: third ~> first") // excellent, all nodes know each other diff --git a/Tests/DistributedClusterTests/Cluster/MembershipGossipLogicSimulationTests.swift b/Tests/DistributedClusterTests/Cluster/MembershipGossipLogicSimulationTests.swift index b950b578f..059843691 100644 --- a/Tests/DistributedClusterTests/Cluster/MembershipGossipLogicSimulationTests.swift +++ b/Tests/DistributedClusterTests/Cluster/MembershipGossipLogicSimulationTests.swift @@ -37,8 +37,8 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas self.systems.first(where: { $0.name == id })! } - var nodes: [UniqueNode] { - self._nodes.map(\.cluster.uniqueNode) + var nodes: [Cluster.Node] { + self._nodes.map(\.cluster.node) } var mockPeers: [_AddressableActorRef] = [] @@ -50,7 +50,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas var logics: [MembershipGossipLogic] = [] func logic(_ id: String) -> MembershipGossipLogic { - guard let logic = (self.logics.first { $0.localNode.node.systemName == id }) else { + guard let logic = (self.logics.first { $0.localNode.endpoint.systemName == id }) else { fatalError("No such logic for id: \(id)") } @@ -93,9 +93,9 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas runs: 10, setUpPeers: { () in [ - Cluster.MembershipGossip.parse(initialGossipState, owner: systemA.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialGossipState, owner: systemB.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialGossipState, owner: systemC.cluster.uniqueNode, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemA.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemB.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemC.cluster.node, nodes: self.nodes), ] }, updateLogic: { _ in @@ -109,7 +109,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas B: A@3 B@3 C@3 C: A@3 B@3 C@3 """, - owner: systemA.cluster.uniqueNode, nodes: nodes + owner: systemA.cluster.node, nodes: nodes )) }, stopRunWhen: { (logics, _) in @@ -171,17 +171,17 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas runs: 1, setUpPeers: { () in [ - Cluster.MembershipGossip.parse(initialFewGossip, owner: systemA.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialFewGossip, owner: systemB.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialFewGossip, owner: systemC.cluster.uniqueNode, nodes: self.nodes), - - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemD.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemE.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemF.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemG.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemH.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemI.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialNewGossip, owner: systemJ.cluster.uniqueNode, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialFewGossip, owner: systemA.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialFewGossip, owner: systemB.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialFewGossip, owner: systemC.cluster.node, nodes: self.nodes), + + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemD.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemE.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemF.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemG.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemH.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemI.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialNewGossip, owner: systemJ.cluster.node, nodes: self.nodes), ] }, updateLogic: { _ in @@ -202,7 +202,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas I: A@20 B@16 C@16 D@16 E@16 F@16 G@16 H@16 I@16 J@16 J: A@20 B@16 C@16 D@16 E@16 F@16 G@16 H@16 I@16 J@16 """, - owner: systemA.cluster.uniqueNode, nodes: nodes + owner: systemA.cluster.node, nodes: nodes )) // they're trying to join @@ -220,7 +220,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas I: A@12 B@11 C@11 D@9 E@13 F@13 G@13 H@13 I@13 J@13 J: A@12 B@11 C@11 D@9 E@13 F@13 G@13 H@13 I@13 J@13 """, - owner: systemD.cluster.uniqueNode, nodes: nodes + owner: systemD.cluster.node, nodes: nodes )) }, stopRunWhen: { (logics, _) in @@ -254,9 +254,9 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas runs: 10, setUpPeers: { () in [ - Cluster.MembershipGossip.parse(initialGossipState, owner: systemA.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialGossipState, owner: systemB.cluster.uniqueNode, nodes: self.nodes), - Cluster.MembershipGossip.parse(initialGossipState, owner: systemC.cluster.uniqueNode, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemA.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemB.cluster.node, nodes: self.nodes), + Cluster.MembershipGossip.parse(initialGossipState, owner: systemC.cluster.node, nodes: self.nodes), ] }, updateLogic: { _ in @@ -270,7 +270,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas B: A@3 B@3 C@3 C: A@3 B@3 C@3 """, - owner: systemA.cluster.uniqueNode, nodes: nodes + owner: systemA.cluster.node, nodes: nodes )) }, stopRunWhen: { logics, _ in @@ -317,7 +317,7 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas for _ in 1 ... runs { // initialize with user provided gossips self.logics = initialGossips.map { initialGossip in - let system = self.system(initialGossip.owner.node.systemName) + let system = self.system(initialGossip.owner.systemName) let probe = self.testKit(system).makeTestProbe(expecting: Cluster.MembershipGossip.self) let logic = self.makeLogic(system, probe) logic.receiveLocalGossipUpdate(initialGossip) @@ -326,11 +326,11 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas func allConverged(gossips: [Cluster.MembershipGossip]) -> Bool { var allSatisfied = true // on purpose not via .allSatisfy() since we want to print status of each logic - for g in gossips.sorted(by: { $0.owner.node.systemName < $1.owner.node.systemName }) { + for g in gossips.sorted(by: { $0.owner.systemName < $1.owner.systemName }) { let converged = g.converged() let convergenceStatus = converged ? "(locally assumed) converged" : "not converged" - log.notice("\(g.owner.node.systemName): \(convergenceStatus)", metadata: [ + log.notice("\(g.owner.endpoint.systemName): \(convergenceStatus)", metadata: [ "gossip": Logger.MetadataValue.pretty(g), ]) @@ -348,25 +348,25 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas let participatingGossips = self.logics.shuffled() for logic in participatingGossips { let selectedPeers: [_AddressableActorRef] = logic.selectPeers(self.peers(of: logic)) - log.notice("[\(logic.nodeName)] selected peers: \(selectedPeers.map(\.id.uniqueNode.node.systemName))") + log.notice("[\(logic.nodeName)] selected peers: \(selectedPeers.map(\.id.node.endpoint.systemName))") for targetPeer in selectedPeers { messageCounts[messageCounts.endIndex - 1] += 1 let targetGossip = logic.makePayload(target: targetPeer) if let gossip = targetGossip { - log.notice(" \(logic.nodeName) -> \(targetPeer.id.uniqueNode.node.systemName)", metadata: [ + log.notice(" \(logic.nodeName) -> \(targetPeer.id.node.endpoint.systemName)", metadata: [ "gossip": Logger.MetadataValue.pretty(gossip), ]) let targetLogic = self.selectLogic(targetPeer) let maybeAck = targetLogic.receiveGossip(gossip, from: self.peer(logic)) - log.notice("updated [\(targetPeer.id.uniqueNode.node.systemName)]", metadata: [ + log.notice("updated [\(targetPeer.id.node.endpoint.systemName)]", metadata: [ "gossip": Logger.MetadataValue.pretty(targetLogic.latestGossip), ]) if let ack = maybeAck { - log.notice(" \(logic.nodeName) <- \(targetPeer.id.uniqueNode.node.systemName) (ack)", metadata: [ + log.notice(" \(logic.nodeName) <- \(targetPeer.id.node.endpoint.systemName) (ack)", metadata: [ "ack": Logger.MetadataValue.pretty(ack), ]) logic.receiveAcknowledgement(ack, from: self.peer(targetLogic), confirming: gossip) @@ -419,16 +419,16 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas // MARK: Support functions func peers(of logic: MembershipGossipLogic) -> [_AddressableActorRef] { - Array(self.mockPeers.filter { $0.id.uniqueNode != logic.localNode }) + Array(self.mockPeers.filter { $0.id.node != logic.localNode }) } func selectLogic(_ peer: _AddressableActorRef) -> MembershipGossipLogic { - (self.logics.first { $0.localNode == peer.id.uniqueNode })! + (self.logics.first { $0.localNode == peer.id.node })! } func peer(_ logic: MembershipGossipLogic) -> _AddressableActorRef { - let nodeName = logic.localNode.node.systemName - if let peer = (self.mockPeers.first { $0.id.uniqueNode.node.systemName == nodeName }) { + let nodeName = logic.localNode.endpoint.systemName + if let peer = (self.mockPeers.first { $0.id.node.endpoint.systemName == nodeName }) { return peer } else { fatalError("No addressable peer for logic: \(logic), peers: \(self.mockPeers)") @@ -438,6 +438,6 @@ final class MembershipGossipLogicSimulationTests: ClusteredActorSystemsXCTestCas extension MembershipGossipLogic { fileprivate var nodeName: String { - self.localNode.node.systemName + self.localNode.endpoint.systemName } } diff --git a/Tests/DistributedClusterTests/Cluster/MembershipGossipTests.swift b/Tests/DistributedClusterTests/Cluster/MembershipGossipTests.swift index 0532f7f38..d94c1e038 100644 --- a/Tests/DistributedClusterTests/Cluster/MembershipGossipTests.swift +++ b/Tests/DistributedClusterTests/Cluster/MembershipGossipTests.swift @@ -19,20 +19,20 @@ import XCTest /// Tests of just the datatype final class MembershipGossipTests: XCTestCase { - var nodeA: UniqueNode! - var nodeB: UniqueNode! - var nodeC: UniqueNode! - var fourthNode: UniqueNode! + var nodeA: Cluster.Node! + var nodeB: Cluster.Node! + var nodeC: Cluster.Node! + var fourthNode: Cluster.Node! lazy var allNodes = [ self.nodeA!, self.nodeB!, self.nodeC!, self.fourthNode!, ] override func setUp() { super.setUp() - self.nodeA = UniqueNode(protocol: "sact", systemName: "firstA", host: "127.0.0.1", port: 7111, nid: .random()) - self.nodeB = UniqueNode(protocol: "sact", systemName: "secondB", host: "127.0.0.1", port: 7222, nid: .random()) - self.nodeC = UniqueNode(protocol: "sact", systemName: "thirdC", host: "127.0.0.1", port: 7333, nid: .random()) - self.fourthNode = UniqueNode(protocol: "sact", systemName: "fourthD", host: "127.0.0.1", port: 7444, nid: .random()) + self.nodeA = Cluster.Node(systemName: "firstA", host: "127.0.0.1", port: 7111, nid: .random()) + self.nodeB = Cluster.Node(systemName: "secondB", host: "127.0.0.1", port: 7222, nid: .random()) + self.nodeC = Cluster.Node(systemName: "thirdC", host: "127.0.0.1", port: 7333, nid: .random()) + self.fourthNode = Cluster.Node(systemName: "fourthD", host: "127.0.0.1", port: 7444, nid: .random()) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -185,7 +185,7 @@ final class MembershipGossipTests: XCTestCase { C: A@5 B@5 C@6 """, owner: self.nodeB, nodes: self.allNodes - ) // TODO: this will be rejected since owner is the downe node (!) add another test with third sending the same info + ) // TODO: this will be rejected since owner is the downed node (!) add another test with third sending the same info let gossipBeforeMerge = gossip let directive = gossip.mergeForward(incoming: incomingOldGossip) @@ -196,7 +196,7 @@ final class MembershipGossipTests: XCTestCase { ) gossip.membership.members(atLeast: .joining).shouldNotContain(removedMember) - gossip.seen.nodes.shouldNotContain(removedMember.uniqueNode) + gossip.seen.nodes.shouldNotContain(removedMember.node) gossip.seen.shouldEqual(gossipBeforeMerge.seen) gossip.membership.shouldEqual(gossipBeforeMerge.membership) @@ -518,12 +518,12 @@ final class MembershipGossipTests: XCTestCase { } func test_gossip_eventuallyConverges() { - func makeRandomGossip(owner node: UniqueNode) -> Cluster.MembershipGossip { + func makeRandomGossip(owner node: Cluster.Node) -> Cluster.MembershipGossip { var gossip = Cluster.MembershipGossip(ownerNode: node) _ = gossip.membership.join(node) _ = gossip.membership.mark(node, as: .joining) var vv = VersionVector() - vv.state[.uniqueNode(node)] = VersionVector.Version(node.port) + vv.state[.node(node)] = VersionVector.Version(node.port) gossip.seen.underlying[node] = .init(vv) // know just enough that we're not alone and thus need to communicate: diff --git a/Tests/DistributedClusterTests/Cluster/Protobuf/ClusterEvents+SerializationTests.swift b/Tests/DistributedClusterTests/Cluster/Protobuf/ClusterEvents+SerializationTests.swift index 41110d34f..d41bfa82a 100644 --- a/Tests/DistributedClusterTests/Cluster/Protobuf/ClusterEvents+SerializationTests.swift +++ b/Tests/DistributedClusterTests/Cluster/Protobuf/ClusterEvents+SerializationTests.swift @@ -26,7 +26,7 @@ final class ClusterEventsSerializationTests: SingleClusterSystemXCTestCase { ) func test_serializationOf_membershipChange() throws { - let change = Cluster.MembershipChange(node: UniqueNode(node: Node(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), previousStatus: .leaving, toStatus: .removed) + let change = Cluster.MembershipChange(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), previousStatus: .leaving, toStatus: .removed) let event = Cluster.Event.membershipChange(change) let proto = try event.toProto(context: self.context) @@ -36,8 +36,8 @@ final class ClusterEventsSerializationTests: SingleClusterSystemXCTestCase { } func test_serializationOf_leadershipChange() throws { - let old = Cluster.Member(node: UniqueNode(node: Node(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), status: .joining) - let new = Cluster.Member(node: UniqueNode(node: Node(systemName: "first", host: "1.2.2.1", port: 2222), nid: .random()), status: .up) + let old = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), status: .joining) + let new = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "first", host: "1.2.2.1", port: 2222), nid: .random()), status: .up) let event = Cluster.Event.leadershipChange(Cluster.LeadershipChange(oldLeader: old, newLeader: new)!) // !-safe, since new/old leader known to be different let proto = try event.toProto(context: self.context) diff --git a/Tests/DistributedClusterTests/Cluster/Protobuf/Membership+SerializationTests.swift b/Tests/DistributedClusterTests/Cluster/Protobuf/Membership+SerializationTests.swift index b3ff2ee16..310b891d7 100644 --- a/Tests/DistributedClusterTests/Cluster/Protobuf/Membership+SerializationTests.swift +++ b/Tests/DistributedClusterTests/Cluster/Protobuf/Membership+SerializationTests.swift @@ -33,8 +33,8 @@ final class MembershipSerializationTests: SingleClusterSystemXCTestCase { func test_serializationOf_membership() throws { let membership: Cluster.Membership = [ - Cluster.Member(node: UniqueNode(node: Node(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), status: .up), - Cluster.Member(node: UniqueNode(node: Node(systemName: "second", host: "2.2.2.2", port: 8228), nid: .random()), status: .down), + Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "first", host: "1.1.1.1", port: 7337), nid: .random()), status: .up), + Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "second", host: "2.2.2.2", port: 8228), nid: .random()), status: .down), ] let proto = try membership.toProto(context: self.context) @@ -49,14 +49,14 @@ final class MembershipSerializationTests: SingleClusterSystemXCTestCase { func test_gossip_serialization() throws { let members = (1 ... 15).map { id in Cluster.Member( - node: UniqueNode( - node: Node(systemName: "\(id)", host: "1.1.1.\(id)", port: 1111), + node: Cluster.Node( + endpoint: Cluster.Endpoint(systemName: "\(id)", host: "1.1.1.\(id)", port: 1111), nid: .init(UInt64("\(id)\(id)\(id)\(id)")!) // pretend a real-ish looking ID, but be easier to read ), status: .up ) } - let nodes = members.map(\.uniqueNode) + let nodes = members.map(\.node) let gossip = Cluster.MembershipGossip.parse( """ diff --git a/Tests/DistributedClusterTests/Cluster/ProtobufRoundTripTests.swift b/Tests/DistributedClusterTests/Cluster/ProtobufRoundTripTests.swift index 278780086..82dc8ffdd 100644 --- a/Tests/DistributedClusterTests/Cluster/ProtobufRoundTripTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ProtobufRoundTripTests.swift @@ -28,13 +28,13 @@ final class ProtobufRoundTripTests: SingleClusterSystemXCTestCase { } let allocator = ByteBufferAllocator() - var node: UniqueNode { - self.system.cluster.uniqueNode + var node: Cluster.Node { + self.system.cluster.node } var localActorAddress: ActorID { try! ActorPath._user.appending("hello") - .makeLocalID(on: self.system.cluster.uniqueNode, incarnation: .wellKnown) + .makeLocalID(on: self.system.cluster.node, incarnation: .wellKnown) } // ==== ------------------------------------------------------------------------------------------------------------ @@ -48,7 +48,7 @@ final class ProtobufRoundTripTests: SingleClusterSystemXCTestCase { // MARK: Handshake protocol func test_roundTrip_Wire_HandshakeOffer() throws { - let offer = Wire.HandshakeOffer(version: .init(reserved: 2, major: 3, minor: 5, patch: 5), originNode: self.node, targetNode: self.node.node) + let offer = Wire.HandshakeOffer(version: .init(reserved: 2, major: 3, minor: 5, patch: 5), originNode: self.node, targetEndpoint: self.node.endpoint) let proto = _ProtoHandshakeOffer(offer) let back = try Wire.HandshakeOffer(fromProto: proto) back.shouldEqual(offer) diff --git a/Tests/DistributedClusterTests/Cluster/Reception/OpLogDistributedReceptionistClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/Reception/OpLogDistributedReceptionistClusteredTests.swift index a03e479f9..9772c5807 100644 --- a/Tests/DistributedClusterTests/Cluster/Reception/OpLogDistributedReceptionistClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/Reception/OpLogDistributedReceptionistClusteredTests.swift @@ -146,8 +146,8 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT await local.receptionist.checkIn(forwarder, with: key) // Join the nodes - local.cluster.join(node: remote.cluster.uniqueNode.node) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + local.cluster.join(endpoint: remote.cluster.node.endpoint) + try assertAssociated(local, withExactly: remote.settings.bindNode) // The remote node discovers the actor try await Task { @@ -202,7 +202,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // remote._receptionist.subscribe(remoteLookupProbe.ref, to: key) // _ = try remoteLookupProbe.expectMessage() // -// local.cluster.join(node: remote.cluster.uniqueNode.node) +// local.cluster.join(endpoint: remote.cluster.node.endpoint) // try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) // // let localListing = try localLookupProbe.expectMessage() @@ -242,7 +242,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // second._receptionist.subscribe(remoteLookupProbe.ref, to: key) // _ = try remoteLookupProbe.expectMessage() // -// first.cluster.join(node: second.cluster.uniqueNode.node) +// first.cluster.join(endpoint: second.cluster.node.endpoint) // try assertAssociated(first, withExactly: second.settings.uniqueBindNode) // // try remoteLookupProbe.eventuallyExpectListing(expected: [refA, refB], within: .seconds(3)) @@ -270,7 +270,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // let (first, second) = setUpPair { // $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) // } -// first.cluster.join(node: second.cluster.uniqueNode.node) +// first.cluster.join(endpoint: second.cluster.node.endpoint) // try assertAssociated(first, withExactly: second.settings.uniqueBindNode) // // let firstKey = _Reception.Key(_ActorRef.self, id: "first") @@ -313,7 +313,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // let (first, second) = setUpPair { // $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) // } -// first.cluster.join(node: second.cluster.uniqueNode.node) +// first.cluster.join(endpoint: second.cluster.node.endpoint) // try assertAssociated(first, withExactly: second.settings.uniqueBindNode) // // let key = _Reception.Key(_ActorRef.self, id: "key") @@ -345,7 +345,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // let (first, second) = setUpPair { // $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) // } -// first.cluster.join(node: second.cluster.uniqueNode.node) +// first.cluster.join(endpoint: second.cluster.node.endpoint) // try assertAssociated(first, withExactly: second.settings.uniqueBindNode) // // let key = _Reception.Key(_ActorRef.self, id: "key") @@ -385,7 +385,7 @@ final class OpLogDistributedReceptionistClusteredTests: ClusteredActorSystemsXCT // let (first, second) = setUpPair { // $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) // } -// first.cluster.join(node: second.cluster.uniqueNode.node) +// first.cluster.join(endpoint: second.cluster.node.endpoint) // try assertAssociated(first, withExactly: second.settings.uniqueBindNode) // // let key = _Reception.Key(_ActorRef.self, id: "first") diff --git a/Tests/DistributedClusterTests/Cluster/Reception/_OpLogClusterReceptionistClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/Reception/_OpLogClusterReceptionistClusteredTests.swift index 0f311a8a7..217ed87ee 100644 --- a/Tests/DistributedClusterTests/Cluster/Reception/_OpLogClusterReceptionistClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/Reception/_OpLogClusterReceptionistClusteredTests.swift @@ -106,8 +106,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest local._receptionist.register(ref, with: key, replyTo: registeredProbe.ref) _ = try registeredProbe.expectMessage() - local.cluster.join(node: remote.cluster.uniqueNode.node) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + local.cluster.join(endpoint: remote.cluster.node.endpoint) + try assertAssociated(local, withExactly: remote.settings.bindNode) let listing = try lookupProbe.expectMessage() listing.refs.count.shouldEqual(1) @@ -157,8 +157,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest remote._receptionist.subscribe(remoteLookupProbe.ref, to: key) _ = try remoteLookupProbe.expectMessage() - local.cluster.join(node: remote.cluster.uniqueNode.node) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + local.cluster.join(endpoint: remote.cluster.node.endpoint) + try assertAssociated(local, withExactly: remote.settings.bindNode) let localListing = try localLookupProbe.expectMessage() localListing.refs.count.shouldEqual(4) @@ -197,8 +197,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest second._receptionist.subscribe(remoteLookupProbe.ref, to: key) _ = try remoteLookupProbe.expectMessage() - first.cluster.join(node: second.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.settings.bindNode) try remoteLookupProbe.eventuallyExpectListing(expected: [refA, refB], within: .seconds(3)) @@ -225,8 +225,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest let (first, second) = await setUpPair { $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) } - first.cluster.join(node: second.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.settings.bindNode) let firstKey = _Reception.Key(_ActorRef.self, id: "first") let extraKey = _Reception.Key(_ActorRef.self, id: "extra") @@ -268,8 +268,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest let (first, second) = await setUpPair { $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) } - first.cluster.join(node: second.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.settings.bindNode) let key = _Reception.Key(_ActorRef.self, id: "key") @@ -300,8 +300,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest let (first, second) = await setUpPair { $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) } - first.cluster.join(node: second.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.settings.bindNode) let key = _Reception.Key(_ActorRef.self, id: "key") @@ -340,8 +340,8 @@ final class _OpLogClusterReceptionistClusteredTests: ClusteredActorSystemsXCTest let (first, second) = await setUpPair { $0.receptionist.ackPullReplicationIntervalSlow = .milliseconds(200) } - first.cluster.join(node: second.cluster.uniqueNode.node) - try assertAssociated(first, withExactly: second.settings.uniqueBindNode) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try assertAssociated(first, withExactly: second.settings.bindNode) let key = _Reception.Key(_ActorRef.self, id: "first") diff --git a/Tests/DistributedClusterTests/Cluster/RemoteActorRefProviderTests.swift b/Tests/DistributedClusterTests/Cluster/RemoteActorRefProviderTests.swift index 8a775a7b9..09d543c95 100644 --- a/Tests/DistributedClusterTests/Cluster/RemoteActorRefProviderTests.swift +++ b/Tests/DistributedClusterTests/Cluster/RemoteActorRefProviderTests.swift @@ -24,8 +24,8 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { } } - let localNode = UniqueNode(systemName: "RemoteAssociationTests", host: "127.0.0.1", port: 7111, nid: UniqueNodeID(777_777)) - let remoteNode = UniqueNode(systemName: "RemoteAssociationTests", host: "127.0.0.1", port: 9559, nid: UniqueNodeID(888_888)) + let localNode = Cluster.Node(systemName: "RemoteAssociationTests", host: "127.0.0.1", port: 7111, nid: Cluster.Node.ID(777_777)) + let remoteNode = Cluster.Node(systemName: "RemoteAssociationTests", host: "127.0.0.1", port: 9559, nid: Cluster.Node.ID(888_888)) lazy var remoteAddress = ActorID(remote: remoteNode, path: try! ActorPath._user.appending("henry").appending("hacker"), incarnation: .random()) // ==== ---------------------------------------------------------------------------------------------------------------- @@ -33,17 +33,17 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { func test_remoteActorRefProvider_shouldMakeRemoteRef_givenSomeRemotePath() throws { // given - let theOne = TheOneWhoHasNoParent(local: system.cluster.uniqueNode) - let guardian = _Guardian(parent: theOne, name: "user", localNode: system.cluster.uniqueNode, system: system) + let theOne = TheOneWhoHasNoParent(local: system.cluster.node) + let guardian = _Guardian(parent: theOne, name: "user", localNode: system.cluster.node, system: system) let localProvider = LocalActorRefProvider(root: guardian) var settings = ClusterSystemSettings(name: "\(Self.self)") - settings.node = self.localNode.node + settings.endpoint = self.localNode.endpoint settings.nid = self.localNode.nid let clusterShell = ClusterShell(settings: settings) let provider = RemoteActorRefProvider(settings: system.settings, cluster: clusterShell, localProvider: localProvider) - let node = UniqueNode(node: .init(systemName: "system", host: "3.3.3.3", port: 2322), nid: .random()) + let node = Cluster.Node(endpoint: .init(systemName: "system", host: "3.3.3.3", port: 2322), nid: .random()) let remoteNode = ActorID(remote: node, path: try ActorPath._user.appending("henry").appending("hacker"), incarnation: ActorIncarnation(1337)) let resolveContext = _ResolveContext(id: remoteNode, system: system) @@ -68,7 +68,7 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { func test_remoteActorRefProvider_shouldResolveDeadRef_forTypeMismatchOfActorAndResolveContext() throws { let ref: _ActorRef = try system._spawn("ignoresStrings", .stop) var id: ActorID = ref.id - id._location = .remote(self.system.settings.uniqueBindNode) + id._location = .remote(self.system.settings.bindNode) let resolveContext = _ResolveContext(id: id, system: system) let resolvedRef = self.system._resolve(context: resolveContext) @@ -79,7 +79,7 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { func test_remoteActorRefProvider_shouldResolveSameAsLocalNodeDeadLettersRef_forTypeMismatchOfActorAndResolveContext() throws { let ref: _ActorRef = self.system.deadLetters var id: ActorID = ref.id - id._location = .remote(self.system.settings.uniqueBindNode) + id._location = .remote(self.system.settings.bindNode) let resolveContext = _ResolveContext(id: id, system: system) let resolvedRef = self.system._resolve(context: resolveContext) @@ -90,7 +90,7 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { func test_remoteActorRefProvider_shouldResolveRemoteDeadLettersRef_forTypeMismatchOfActorAndResolveContext() throws { let ref: _ActorRef = self.system.deadLetters var id: ActorID = ref.id - let unknownNode = UniqueNode(node: .init(systemName: "something", host: "1.1.1.1", port: 1111), nid: UniqueNodeID(1211)) + let unknownNode = Cluster.Node(endpoint: .init(systemName: "something", host: "1.1.1.1", port: 1111), nid: Cluster.Node.ID(1211)) id._location = .remote(unknownNode) let resolveContext = _ResolveContext(id: id, system: system) @@ -100,7 +100,7 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { } func test_remoteActorRefProvider_shouldResolveRemoteAlreadyDeadRef_forTypeMismatchOfActorAndResolveContext() throws { - let unknownNode = UniqueNode(node: .init(systemName: "something", host: "1.1.1.1", port: 1111), nid: UniqueNodeID(1211)) + let unknownNode = Cluster.Node(endpoint: .init(systemName: "something", host: "1.1.1.1", port: 1111), nid: Cluster.Node.ID(1211)) let id: ActorID = try .init(remote: unknownNode, path: ActorPath._dead.appending("already"), incarnation: .wellKnown) let resolveContext = _ResolveContext(id: id, system: system) @@ -113,7 +113,7 @@ final class RemoteActorRefProviderTests: SingleClusterSystemXCTestCase { let ref: _ActorRef = self.system.deadLetters.adapt(from: String.self) var id: ActorID = ref.id - id._location = .remote(self.system.settings.uniqueBindNode) + id._location = .remote(self.system.settings.bindNode) let resolveContext = _ResolveContext(id: id, system: system) let resolvedRef = self.system._resolve(context: resolveContext) diff --git a/Tests/DistributedClusterTests/Cluster/RemoteMessagingClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/RemoteMessagingClusteredTests.swift index 7670730f2..ad47d921c 100644 --- a/Tests/DistributedClusterTests/Cluster/RemoteMessagingClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/RemoteMessagingClusteredTests.swift @@ -47,9 +47,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let nonCodableResolvedRef = self.resolveRef(local, type: SerializationTestMessage.self, id: nonCodableRefOnRemoteSystem.id, on: remote) nonCodableResolvedRef.tell(SerializationTestMessage(serializationBehavior: .succeed)) @@ -78,9 +78,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let nonCodableResolvedRef = self.resolveRef(local, type: SerializationTestMessage.self, id: refOnRemoteSystem.id, on: remote) nonCodableResolvedRef.tell(SerializationTestMessage(serializationBehavior: .failEncoding)) @@ -107,9 +107,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let nonCodableResolvedRef = self.resolveRef(local, type: SerializationTestMessage.self, id: nonCodableRefOnRemoteSystem.id, on: remote) nonCodableResolvedRef.tell(SerializationTestMessage(serializationBehavior: .failDecoding)) @@ -168,9 +168,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let remoteRef = self.resolveRef(local, type: EchoTestMessage.self, id: refOnRemoteSystem.id, on: remote) remoteRef.tell(EchoTestMessage(string: "test", respondTo: localRef)) @@ -195,9 +195,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let remoteRef = self.resolveRef(local, type: EchoTestMessage.self, id: refOnRemoteSystem.id, on: remote) @@ -238,9 +238,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } ) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) let remoteRef = self.resolveRef(local, type: EchoTestMessage.self, id: refOnRemoteSystem.id, on: remote) @@ -264,9 +264,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { settings.serialization.register(SerializationTestMessage.self) settings.serialization.register(EchoTestMessage.self) } - remote.cluster.join(node: local.cluster.uniqueNode.node) + remote.cluster.join(endpoint: local.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.cluster.uniqueNode) + try assertAssociated(local, withExactly: remote.cluster.node) let thirdSystem = await setUpNode("ClusterAssociationTests") { settings in settings.bindPort = 9119 @@ -275,9 +275,9 @@ final class RemoteMessagingClusteredTests: ClusteredActorSystemsXCTestCase { } defer { try! thirdSystem.shutdown().wait() } - thirdSystem.cluster.join(node: local.cluster.uniqueNode.node) - thirdSystem.cluster.join(node: remote.cluster.uniqueNode.node) - try assertAssociated(thirdSystem, withExactly: [local.cluster.uniqueNode, remote.cluster.uniqueNode]) + thirdSystem.cluster.join(endpoint: local.cluster.node.endpoint) + thirdSystem.cluster.join(endpoint: remote.cluster.node.endpoint) + try assertAssociated(thirdSystem, withExactly: [local.cluster.node, remote.cluster.node]) let thirdTestKit = ActorTestKit(thirdSystem) let localRef: _ActorRef = try local._spawn( diff --git a/Tests/DistributedClusterTests/Cluster/RemotingHandshakeStateMachineTests.swift b/Tests/DistributedClusterTests/Cluster/RemotingHandshakeStateMachineTests.swift index b3fc04f3f..40ec39f20 100644 --- a/Tests/DistributedClusterTests/Cluster/RemotingHandshakeStateMachineTests.swift +++ b/Tests/DistributedClusterTests/Cluster/RemotingHandshakeStateMachineTests.swift @@ -32,11 +32,11 @@ final class RemoteHandshakeStateMachineTests: XCTestCase { let serverAddress = serverKernel.selfNode let clientKernel = ClusterShellState.makeTestMock(side: .client) { settings in - settings.node.port = 2222 + settings.endpoint.port = 2222 } // client - let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.node) + let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.endpoint) let offer = clientInitiated.makeOffer() // server @@ -70,11 +70,11 @@ final class RemoteHandshakeStateMachineTests: XCTestCase { let serverAddress = serverKernel.selfNode let clientKernel = ClusterShellState.makeTestMock(side: .client) { settings in - settings.node.port = 2222 + settings.endpoint.port = 2222 settings._protocolVersion.patch += 1 } - let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.node) + let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.endpoint) let offer = clientInitiated.makeOffer() // server @@ -95,11 +95,11 @@ final class RemoteHandshakeStateMachineTests: XCTestCase { let serverAddress = serverKernel.selfNode let clientKernel = ClusterShellState.makeTestMock(side: .client) { settings in - settings.node.port = 2222 + settings.endpoint.port = 2222 settings._protocolVersion.major += 1 } - let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.node) + let clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.endpoint) let offer = clientInitiated.makeOffer() // server @@ -126,11 +126,11 @@ final class RemoteHandshakeStateMachineTests: XCTestCase { let serverAddress = serverKernel.selfNode let clientKernel = ClusterShellState.makeTestMock(side: .client) { settings in - settings.node.port = 8228 + settings.endpoint.port = 8228 } // client - var clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.node) + var clientInitiated = HSM.InitiatedState(settings: clientKernel.settings, localNode: clientKernel.selfNode, connectTo: serverAddress.endpoint) guard case .scheduleRetryHandshake = clientInitiated.onHandshakeTimeout() else { throw shouldNotHappen("Expected retry attempt after handshake timeout") diff --git a/Tests/DistributedClusterTests/Cluster/RemotingTLSClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/RemotingTLSClusteredTests.swift index 5f922bfd5..b5f222000 100644 --- a/Tests/DistributedClusterTests/Cluster/RemotingTLSClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/RemotingTLSClusteredTests.swift @@ -190,7 +190,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { let testKeySource2: NIOSSLPrivateKeySource = .privateKey(try NIOSSLPrivateKey(bytes: [UInt8](testKey2.utf8), format: .pem)) let local = await setUpNode("local") { settings in - settings.node.host = "localhost" + settings.endpoint.host = "localhost" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource1], privateKey: testKeySource1 @@ -203,7 +203,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { } let remote = await setUpNode("remote") { settings in - settings.node.host = "localhost" + settings.endpoint.host = "localhost" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource2], privateKey: testKeySource2 @@ -215,9 +215,9 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { settings.tls?.trustRoots = .certificates([testCertificate1]) } - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) } // FIXME: Test Case '-[DistributedActorsTests.RemotingTLSTests test_boundServer_shouldFailWithSSLEnabledOnHostnameVerificationWithIP]' started. @@ -228,7 +228,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { let testKey: NIOSSLPrivateKeySource = .privateKey(try NIOSSLPrivateKey(bytes: [UInt8](testKey1.utf8), format: .pem)) let local = await self.setUpNode("local") { settings in - settings.node.host = "127.0.0.1" + settings.endpoint.host = "127.0.0.1" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource], privateKey: testKey @@ -241,7 +241,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { } let remote = await setUpNode("remote") { settings in - settings.node.host = "127.0.0.1" + settings.endpoint.host = "127.0.0.1" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource], privateKey: testKey @@ -255,12 +255,12 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { let testKit = ActorTestKit(local) - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) sleep(2) do { - let pSystem = testKit.makeTestProbe(expecting: Set.self) + let pSystem = testKit.makeTestProbe(expecting: Set.self) local.cluster.ref.tell(.query(.associatedNodes(pSystem.ref))) remote.cluster.ref.tell(.query(.associatedNodes(pSystem.ref))) let associatedNodes = try pSystem.expectMessage() @@ -268,7 +268,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { } do { - let pRemote = testKit.makeTestProbe(expecting: Set.self) + let pRemote = testKit.makeTestProbe(expecting: Set.self) local.cluster.ref.tell(.query(.associatedNodes(pRemote.ref))) // FIXME: We need to get the Accept back and act on it on the origin side remote.cluster.ref.tell(.query(.associatedNodes(pRemote.ref))) let associatedNodes = try pRemote.expectMessage() @@ -281,7 +281,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { let testCertificateSource: NIOSSLCertificateSource = .certificate(testCertificate) let testKey: NIOSSLPrivateKeySource = .privateKey(try NIOSSLPrivateKey(bytes: [UInt8](testKey1.utf8), format: .pem)) let local = await setUpNode("local") { settings in - settings.node.host = "localhost" + settings.endpoint.host = "localhost" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource], privateKey: testKey @@ -294,7 +294,7 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { } let remote = await setUpNode("remote") { settings in - settings.node.host = "localhost" + settings.endpoint.host = "localhost" settings.tls = TLSConfiguration.makeServerConfiguration( certificateChain: [testCertificateSource], privateKey: testKey @@ -306,9 +306,9 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { settings.tls?.trustRoots = .certificates([testCertificate]) } - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) } func test_boundServer_shouldAcceptAssociateWithSSLEnabledAndCorrectPassphrase() async throws { @@ -352,8 +352,8 @@ class RemotingTLSTests: ClusteredActorSystemsXCTestCase { } } - local.cluster.join(node: remote.cluster.uniqueNode.node) + local.cluster.join(endpoint: remote.cluster.node.endpoint) - try assertAssociated(local, withExactly: remote.settings.uniqueBindNode) + try assertAssociated(local, withExactly: remote.settings.bindNode) } } diff --git a/Tests/DistributedClusterTests/Cluster/SWIM/SWIMActorClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/SWIM/SWIMActorClusteredTests.swift index 7c882e1c4..be8f0a808 100644 --- a/Tests/DistributedClusterTests/Cluster/SWIM/SWIMActorClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/SWIM/SWIMActorClusteredTests.swift @@ -65,8 +65,8 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { } let secondNode = await self.setUpSecond() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode) - try assertAssociated(firstNode, withExactly: secondNode.cluster.uniqueNode) + firstNode.cluster.join(endpoint: secondNode.cluster.endpoint) + try assertAssociated(firstNode, withExactly: secondNode.cluster.node) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -118,9 +118,9 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let secondNode = await self.setUpSecond() let thirdNode = await self.setUpThird() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - thirdNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - try assertAssociated(firstNode, withExactly: secondNode.cluster.uniqueNode) + firstNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + thirdNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + try assertAssociated(firstNode, withExactly: secondNode.cluster.node) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -137,7 +137,7 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { // FIXME: use a non-responsive test probe instead of real system // Down the node so it doesn't respond to ping try thirdNode.shutdown() - try await self.ensureNodes(.removed, on: secondNode, nodes: thirdNode.cluster.uniqueNode) + try await self.ensureNodes(.removed, on: secondNode, nodes: thirdNode.cluster.node) let originPeer = try SWIMActor.resolve(id: first.id._asRemote, using: secondNode) let targetPeer = try SWIMActor.resolve(id: third.id._asRemote, using: secondNode) @@ -154,9 +154,9 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let secondNode = await self.setUpSecond() let thirdNode = await self.setUpThird() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - thirdNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - try assertAssociated(firstNode, withExactly: secondNode.cluster.uniqueNode) + firstNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + thirdNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + try assertAssociated(firstNode, withExactly: secondNode.cluster.node) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -229,8 +229,8 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let firstNode = await self.setUpFirst() let secondNode = await self.setUpSecond() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - try assertAssociated(firstNode, withExactly: secondNode.cluster.uniqueNode) + firstNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + try assertAssociated(firstNode, withExactly: secondNode.cluster.node) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -311,10 +311,10 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let secondNode = await self.setUpSecond() let thirdNode = await self.setUpThird() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - thirdNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - try assertAssociated(firstNode, withExactly: [secondNode.cluster.uniqueNode, thirdNode.cluster.uniqueNode]) - try assertAssociated(secondNode, withExactly: [firstNode.cluster.uniqueNode, thirdNode.cluster.uniqueNode]) + firstNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + thirdNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + try assertAssociated(firstNode, withExactly: [secondNode.cluster.node, thirdNode.cluster.node]) + try assertAssociated(secondNode, withExactly: [firstNode.cluster.node, thirdNode.cluster.node]) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -371,9 +371,9 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let firstNode = await self.setUpFirst() let secondNode = await self.setUpSecond() - firstNode.cluster.join(node: secondNode.cluster.uniqueNode.node) - try assertAssociated(firstNode, withExactly: secondNode.cluster.uniqueNode) - try assertAssociated(secondNode, withExactly: firstNode.cluster.uniqueNode) + firstNode.cluster.join(endpoint: secondNode.cluster.node.endpoint) + try assertAssociated(firstNode, withExactly: secondNode.cluster.node) + try assertAssociated(secondNode, withExactly: firstNode.cluster.node) guard let first = firstNode._cluster?._swimShell else { throw testKit(firstNode).fail("SWIM shell of [\(firstNode)] should not be nil") @@ -400,7 +400,7 @@ final class SWIMActorClusteredTests: ClusteredActorSystemsXCTestCase { let localNode = await self.setUpFirst() let remoteNode = await self.setUpSecond() - localNode.cluster.join(node: remoteNode.cluster.uniqueNode.node) + localNode.cluster.join(endpoint: remoteNode.cluster.node.endpoint) guard let local = localNode._cluster?._swimShell else { throw testKit(localNode).fail("SWIM shell of [\(localNode)] should not be nil") diff --git a/Tests/DistributedClusterTests/Cluster/ShootTheOtherNodeClusteredTests.swift b/Tests/DistributedClusterTests/Cluster/ShootTheOtherNodeClusteredTests.swift index e4593d892..9531d9d4a 100644 --- a/Tests/DistributedClusterTests/Cluster/ShootTheOtherNodeClusteredTests.swift +++ b/Tests/DistributedClusterTests/Cluster/ShootTheOtherNodeClusteredTests.swift @@ -34,9 +34,9 @@ final class ShootTheOtherNodeClusteredTests: ClusteredActorSystemsXCTestCase { // also assures they are associated try await self.joinNodes(node: local, with: remote, ensureWithin: .seconds(5), ensureMembers: .up) - let remoteAssociationControlState0 = local._cluster!.getEnsureAssociation(with: remote.cluster.uniqueNode) + let remoteAssociationControlState0 = local._cluster!.getEnsureAssociation(with: remote.cluster.node) guard case ClusterShell.StoredAssociationState.association(let remoteControl0) = remoteAssociationControlState0 else { - throw Boom("Expected the association to exist for \(remote.cluster.uniqueNode)") + throw Boom("Expected the association to exist for \(remote.cluster.node)") } ClusterShell.shootTheOtherNodeAndCloseConnection(system: local, targetNodeAssociation: remoteControl0) diff --git a/Tests/DistributedClusterTests/Cluster/SystemMessagesRedeliveryTests.swift b/Tests/DistributedClusterTests/Cluster/SystemMessagesRedeliveryTests.swift index ad257f739..956ad8178 100644 --- a/Tests/DistributedClusterTests/Cluster/SystemMessagesRedeliveryTests.swift +++ b/Tests/DistributedClusterTests/Cluster/SystemMessagesRedeliveryTests.swift @@ -25,7 +25,7 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { let outbound = OutboundSystemMessageRedelivery() for i in 1 ... (outbound.settings.redeliveryBatchSize + 5) { - switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) { + switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) { case .send(let envelope): envelope.sequenceNr.shouldEqual(self.seqNr(i)) @@ -38,9 +38,9 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_ack_shouldCumulativelyAcknowledge() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 outbound.messagesPendingAcknowledgement.count.shouldEqual(3) @@ -55,9 +55,9 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_ack_shouldIgnoreDuplicateACK() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 let res1 = outbound.acknowledge(self.ack(2)) guard case .acknowledged = res1 else { @@ -75,9 +75,9 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_ack_shouldRejectACKAboutFutureSeqNrs() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 let res = outbound.acknowledge(self.ack(4)) // 4 was not sent yet (!) guard case .ackWasForFutureSequenceNr(let highestKnownSeqNr) = res else { @@ -92,13 +92,13 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_ack_thenOfferMore_shouldContinueAtRightSequenceNr() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 _ = outbound.acknowledge(self.ack(1)) - switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) { + switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) { case .send(let envelope): envelope.sequenceNr.shouldEqual(SystemMessageEnvelope.SequenceNr(4)) // continue from where we left off case let other: @@ -107,7 +107,7 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { _ = outbound.acknowledge(self.ack(4)) - switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) { + switch outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) { case .send(let envelope): envelope.sequenceNr.shouldEqual(SystemMessageEnvelope.SequenceNr(5)) // continue from where we left off case let other: @@ -118,9 +118,9 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_nack_shouldCauseAppropriateRedelivery() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 let res = outbound.negativeAcknowledge(self.nack(1)) // we saw 3 but not 2 guard case .ensureRedeliveryTick = res else { @@ -134,9 +134,9 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { func test_sysMsg_outbound_redeliveryTick_shouldRedeliverPendingMessages() { let outbound = OutboundSystemMessageRedelivery() - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 3 // none are ACKed switch outbound.onRedeliveryTick() { @@ -154,7 +154,7 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { XCTFail("Expected [.redeliver], was: \(other)") } - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 4 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 4 switch outbound.onRedeliveryTick() { case .redeliver(let envelopes, _): envelopes.count.shouldEqual(2) @@ -168,13 +168,13 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { settings.redeliveryBatchSize = 3 let outbound = OutboundSystemMessageRedelivery(settings: settings) - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 111, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 222, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 2 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 333, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 3 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 444, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 4 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 555, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 5 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 666, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 6 - _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 777, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 7 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 111, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 1 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 222, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 2 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 333, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 3 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 444, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 4 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 555, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 5 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 666, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 6 + _ = outbound.offer(.nodeTerminated(.init(systemName: "S", host: "127.0.0.1", port: 777, nid: .random())), recipient: ._deadLetters(on: self.system.cluster.node)) // 7 // none are ACKed switch outbound.onRedeliveryTick() { @@ -193,14 +193,14 @@ final class SystemMessagesRedeliveryTests: SingleClusterSystemXCTestCase { settings.redeliveryBufferLimit = 5 let outbound = OutboundSystemMessageRedelivery(settings: settings) - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // 1 _ = outbound.acknowledge(.init(sequenceNr: 1)) - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 1 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 2 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 3 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 4 - _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 5 - let res = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.uniqueNode)) // buffered: 6; oh oh! we'd be over 5 buffered + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 1 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 2 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 3 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 4 + _ = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 5 + let res = outbound.offer(.start, recipient: ._deadLetters(on: self.system.cluster.node)) // buffered: 6; oh oh! we'd be over 5 buffered guard case .bufferOverflowMustAbortAssociation(let limit) = res else { XCTFail("Expected [.bufferOverflowMustAbortAssociation], was: [\(res)]") diff --git a/Tests/DistributedClusterTests/Cluster/TestExtensions+MembershipDSL.swift b/Tests/DistributedClusterTests/Cluster/TestExtensions+MembershipDSL.swift index f0cc16927..4055eb582 100644 --- a/Tests/DistributedClusterTests/Cluster/TestExtensions+MembershipDSL.swift +++ b/Tests/DistributedClusterTests/Cluster/TestExtensions+MembershipDSL.swift @@ -21,7 +21,7 @@ import NIO extension Cluster.MembershipGossip { /// First line is Membership DSL, followed by lines of the SeenTable DSL - static func parse(_ dsl: String, owner: UniqueNode, nodes: [UniqueNode]) -> Cluster.MembershipGossip { + static func parse(_ dsl: String, owner: Cluster.Node, nodes: [Cluster.Node]) -> Cluster.MembershipGossip { let dslLines = dsl.split(separator: "\n") var gossip = Cluster.MembershipGossip(ownerNode: owner) gossip.membership = Cluster.Membership.parse(String(dslLines.first!), nodes: nodes) @@ -33,10 +33,10 @@ extension Cluster.MembershipGossip { extension Cluster.MembershipGossip.SeenTable { /// Express seen tables using a DSL /// Syntax: each line: `: @*` - static func parse(_ dslString: String, nodes: [UniqueNode], file: StaticString = #file, line: UInt = #line) -> Cluster.MembershipGossip.SeenTable { + static func parse(_ dslString: String, nodes: [Cluster.Node], file: StaticString = #file, line: UInt = #line) -> Cluster.MembershipGossip.SeenTable { let lines = dslString.split(separator: "\n") - func nodeById(id: String.SubSequence) -> UniqueNode { - if let found = nodes.first(where: { $0.node.systemName.contains(id) }) { + func nodeById(id: String.SubSequence) -> Cluster.Node { + if let found = nodes.first(where: { $0.systemName.contains(id) }) { return found } else { fatalError("Could not find node containing [\(id)] in \(nodes), for seen table: \(dslString)", file: file, line: line) @@ -60,7 +60,7 @@ extension Cluster.MembershipGossip.SeenTable { let versionString = parts.dropFirst().first! let atVersion = UInt64(versionString)! - vv.state[.uniqueNode(atNode)] = atVersion + vv.state[.node(atNode)] = atVersion } table.underlying[on] = vv @@ -71,9 +71,9 @@ extension Cluster.MembershipGossip.SeenTable { } extension VersionVector { - static func parse(_ dslString: String, nodes: [UniqueNode], file: StaticString = #file, line: UInt = #line) -> VersionVector { - func nodeById(id: String.SubSequence) -> UniqueNode { - if let found = nodes.first(where: { $0.node.systemName.contains(id) }) { + static func parse(_ dslString: String, nodes: [Cluster.Node], file: StaticString = #file, line: UInt = #line) -> VersionVector { + func nodeById(id: String.SubSequence) -> Cluster.Node { + if let found = nodes.first(where: { $0.systemName.contains(id) }) { return found } else { fatalError("Could not find node containing [\(id)] in \(nodes), for seen table: \(dslString)", file: file, line: line) @@ -82,7 +82,7 @@ extension VersionVector { let replicaVersions: [VersionVector.ReplicaVersion] = dslString.split(separator: " ").map { segment in let v = segment.split { c in ":@".contains(c) } - return (.uniqueNode(nodeById(id: v.first!)), VersionVector.Version(v.dropFirst().first!)!) + return (.node(nodeById(id: v.first!)), VersionVector.Version(v.dropFirst().first!)!) } return VersionVector(replicaVersions) } @@ -96,9 +96,9 @@ extension Cluster.Membership { /// ``` /// [.:] || [leader:] /// ``` - static func parse(_ dslString: String, nodes: [UniqueNode], file: StaticString = #file, line: UInt = #line) -> Cluster.Membership { - func nodeById(id: String.SubSequence) -> UniqueNode { - if let found = nodes.first(where: { $0.node.systemName.contains(id) }) { + static func parse(_ dslString: String, nodes: [Cluster.Node], file: StaticString = #file, line: UInt = #line) -> Cluster.Membership { + func nodeById(id: String.SubSequence) -> Cluster.Node { + if let found = nodes.first(where: { $0.systemName.contains(id) }) { return found } else { fatalError("Could not find node containing [\(id)] in \(nodes), for seen table: \(dslString)", file: file, line: line) diff --git a/Tests/DistributedClusterTests/Cluster/TestExtensions.swift b/Tests/DistributedClusterTests/Cluster/TestExtensions.swift index 4302dbade..66d85c04c 100644 --- a/Tests/DistributedClusterTests/Cluster/TestExtensions.swift +++ b/Tests/DistributedClusterTests/Cluster/TestExtensions.swift @@ -25,7 +25,7 @@ enum HandshakeSide: String { extension ClusterShellState { static func makeTestMock(side: HandshakeSide, configureSettings: (inout ClusterSystemSettings) -> Void = { _ in () }) -> ClusterShellState { var settings = ClusterSystemSettings( - node: Node( + endpoint: Cluster.Endpoint( systemName: "MockSystem", host: "127.0.0.1", port: 7337 @@ -34,7 +34,7 @@ extension ClusterShellState { configureSettings(&settings) let log = Logger(label: "handshake-\(side)") // TODO: could be a mock logger we can assert on? - let node: UniqueNode = .init(systemName: "Test", host: "127.0.0.1", port: 7337, nid: .random()) + let node = Cluster.Node(systemName: "Test", host: "127.0.0.1", port: 7337, nid: .random()) return ClusterShellState( settings: settings, channel: EmbeddedChannel(), diff --git a/Tests/DistributedClusterTests/ClusterSystem+Testing.swift b/Tests/DistributedClusterTests/ClusterSystem+Testing.swift index 562f19746..f7d54a33c 100644 --- a/Tests/DistributedClusterTests/ClusterSystem+Testing.swift +++ b/Tests/DistributedClusterTests/ClusterSystem+Testing.swift @@ -25,7 +25,7 @@ extension ClusterSystem { func _resolve(ref: _ActorRef, onSystem remoteSystem: ClusterSystem) -> _ActorRef { assertBacktrace(ref.id._isLocal, "Expecting passed in `ref` to not have an address defined (yet), as this is what we are going to do in this function.") - let remoteID = ActorID(remote: remoteSystem.settings.uniqueBindNode, path: ref.path, incarnation: ref.id.incarnation) + let remoteID = ActorID(remote: remoteSystem.settings.bindNode, path: ref.path, incarnation: ref.id.incarnation) let resolveContext = _ResolveContext(id: remoteID, system: self) return self._resolve(context: resolveContext) @@ -34,10 +34,10 @@ extension ClusterSystem { /// Internal utility to create "known remote ref" on known target system. /// Real applications should never do this, and instead rely on the `Receptionist` to discover references. func _resolveKnownRemote(_ ref: _ActorRef, onRemoteSystem remote: ClusterSystem) -> _ActorRef { - self._resolveKnownRemote(ref, onRemoteNode: remote.cluster.uniqueNode) + self._resolveKnownRemote(ref, onRemoteNode: remote.cluster.node) } - func _resolveKnownRemote(_ ref: _ActorRef, onRemoteNode remoteNode: UniqueNode) -> _ActorRef { + func _resolveKnownRemote(_ ref: _ActorRef, onRemoteNode remoteNode: Cluster.Node) -> _ActorRef { guard let shell = self._cluster else { fatalError("Actor System must have clustering enabled to allow resolving remote actors") } diff --git a/Tests/DistributedClusterTests/ClusterSystemTests.swift b/Tests/DistributedClusterTests/ClusterSystemTests.swift index d11bd4de3..2902182dc 100644 --- a/Tests/DistributedClusterTests/ClusterSystemTests.swift +++ b/Tests/DistributedClusterTests/ClusterSystemTests.swift @@ -131,7 +131,7 @@ final class ClusterSystemTests: SingleClusterSystemXCTestCase { func test_resolveUnknownActor_shouldReturnPersonalDeadLetters() throws { let path = try ActorPath._user.appending("test").appending("foo").appending("bar") - let id = ActorID(local: self.system.cluster.uniqueNode, path: path, incarnation: .random()) + let id = ActorID(local: self.system.cluster.node, path: path, incarnation: .random()) let context: _ResolveContext = _ResolveContext(id: id, system: self.system) let ref = self.system._resolve(context: context) @@ -147,16 +147,16 @@ final class ClusterSystemTests: SingleClusterSystemXCTestCase { let remote = await setUpNode("remote") { settings in settings.enabled = true } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) - let remoteAssociationControlState0 = local._cluster!.getEnsureAssociation(with: remote.cluster.uniqueNode) + let remoteAssociationControlState0 = local._cluster!.getEnsureAssociation(with: remote.cluster.node) guard case ClusterShell.StoredAssociationState.association(let remoteControl0) = remoteAssociationControlState0 else { - throw Boom("Expected the association to exist for \(remote.cluster.uniqueNode)") + throw Boom("Expected the association to exist for \(remote.cluster.node)") } ClusterShell.shootTheOtherNodeAndCloseConnection(system: local, targetNodeAssociation: remoteControl0) - // Node should eventually appear in tombstones + // Endpoint should eventually appear in tombstones try self.testKit(local).eventually(within: .seconds(3)) { guard local._cluster?._testingOnly_associationTombstones.isEmpty == false else { throw Boom("Expected tombstone for downed node") diff --git a/Tests/DistributedClusterTests/DeadLetterTests.swift b/Tests/DistributedClusterTests/DeadLetterTests.swift index 613a3d68f..f07f0826f 100644 --- a/Tests/DistributedClusterTests/DeadLetterTests.swift +++ b/Tests/DistributedClusterTests/DeadLetterTests.swift @@ -25,7 +25,7 @@ final class DeadLetterTests: SingleClusterSystemXCTestCase { func test_deadLetters_logWithSourcePosition() throws { let log = self.logCapture.logger(label: "/dead/letters") - let id = try ActorID(local: self.system.cluster.uniqueNode, path: ActorPath._user.appending("someone"), incarnation: .random()) + let id = try ActorID(local: self.system.cluster.node, path: ActorPath._user.appending("someone"), incarnation: .random()) let office = DeadLetterOffice(log, id: id, system: system) office.deliver("Hello") @@ -89,7 +89,7 @@ final class DeadLetterTests: SingleClusterSystemXCTestCase { let remote = await setUpNode("remote") { settings in settings.enabled = true } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) var greeter: Greeter? = Greeter(actorSystem: local) let greeterID = greeter!.id diff --git a/Tests/DistributedClusterTests/DistributedReceptionistTests.swift b/Tests/DistributedClusterTests/DistributedReceptionistTests.swift index 67e80ac15..472fe2eb6 100644 --- a/Tests/DistributedClusterTests/DistributedReceptionistTests.swift +++ b/Tests/DistributedClusterTests/DistributedReceptionistTests.swift @@ -153,8 +153,8 @@ final class DistributedReceptionistTests: SingleClusterSystemXCTestCase { let ref = Forwarder(probe: nil, name: "D", actorSystem: first) await first.receptionist.checkIn(ref, with: .forwarders) - first.cluster.join(node: second.cluster.uniqueNode.node) - try await first.cluster.joined(node: second.cluster.uniqueNode, within: .seconds(30)) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try await first.cluster.joined(node: second.cluster.node, within: .seconds(30)) try await testKit.eventually(within: .seconds(5)) { let lookup = await second.receptionist.lookup(.forwarders) @@ -173,8 +173,8 @@ final class DistributedReceptionistTests: SingleClusterSystemXCTestCase { let ref = Forwarder(probe: nil, name: "D", actorSystem: first) await first.receptionist.checkIn(ref, with: .forwarders) - first.cluster.join(node: second.cluster.uniqueNode.node) - try await first.cluster.joined(node: second.cluster.uniqueNode, within: .seconds(30)) + first.cluster.join(endpoint: second.cluster.node.endpoint) + try await first.cluster.joined(node: second.cluster.node, within: .seconds(30)) try await testKit.eventually(within: .seconds(5)) { let lookup = await second.receptionist.lookup(.forwarders) diff --git a/Tests/DistributedClusterTests/EndpointTests.swift b/Tests/DistributedClusterTests/EndpointTests.swift new file mode 100644 index 000000000..2c785cb6d --- /dev/null +++ b/Tests/DistributedClusterTests/EndpointTests.swift @@ -0,0 +1,57 @@ +//===----------------------------------------------------------------------===// +// +// This source file is part of the Swift Distributed Actors open source project +// +// Copyright (c) 2018-2019 Apple Inc. and the Swift Distributed Actors project authors +// Licensed under Apache License v2.0 +// +// See LICENSE.txt for license information +// See CONTRIBUTORS.md for the list of Swift Distributed Actors project authors +// +// SPDX-License-Identifier: Apache-2.0 +// +//===----------------------------------------------------------------------===// + +import DistributedActorsTestKit +@testable import DistributedCluster +import Foundation +import XCTest + +final class EndpointTests: XCTestCase { + // ==== ------------------------------------------------------------------------------------------------------------ + // MARK: Endpoint + + func test_nodes_equal_whenHostPortMatch() { + let alpha = Cluster.Endpoint(systemName: "SystemNameAlpha", host: "111.111.11.1", port: 1111) + let beta = Cluster.Endpoint(systemName: "SystemNameBeta", host: "111.111.11.1", port: 1111) + + // system names are only for human readability / debugging, not equality + alpha.shouldEqual(beta) + } + + // ==== ------------------------------------------------------------------------------------------------------------ + // MARK: Cluster.Node + + func test_node_shouldRenderProperly() { + let endpoint = Cluster.Endpoint(systemName: "SystemName", host: "188.121.122.3", port: 1111) + let node = Cluster.Node(endpoint: endpoint, nid: Cluster.Node.ID(2222)) + + "\(node)".shouldEqual("sact://SystemName@188.121.122.3:1111") + "\(String(reflecting: node))".shouldEqual("sact://SystemName:2222@188.121.122.3:1111") + } + + func test_node_comparison_equal() { + let two = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: Cluster.Node.ID(2222)) + let anotherTwo = two + + two.shouldEqual(anotherTwo) + two.shouldBeLessThanOrEqual(anotherTwo) + } + + func test_node_comparison_lessThan() { + let two = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: Cluster.Node.ID(2222)) + let three = Cluster.Node(endpoint: Cluster.Endpoint(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: Cluster.Node.ID(3333)) + + two.shouldBeLessThan(three) + } +} diff --git a/Tests/DistributedClusterTests/InterceptorTests.swift b/Tests/DistributedClusterTests/InterceptorTests.swift index ed96ceccd..92adc08e8 100644 --- a/Tests/DistributedClusterTests/InterceptorTests.swift +++ b/Tests/DistributedClusterTests/InterceptorTests.swift @@ -64,7 +64,7 @@ final class InterceptorTests: SingleClusterSystemXCTestCase { let remote = await setUpNode("remote") { settings in settings.enabled = true } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let otherGreeter = Greeter(actorSystem: local, greeting: "HI!!!") let localGreeter: Greeter = try system.interceptCalls( @@ -86,7 +86,7 @@ final class InterceptorTests: SingleClusterSystemXCTestCase { let remote = await setUpNode("remote") { settings in settings.enabled = true } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let otherGreeter = Greeter(actorSystem: local, greeting: "HI!!!") let localGreeter: Greeter = try shouldNotThrow { diff --git a/Tests/DistributedClusterTests/LifecycleWatchTests.swift b/Tests/DistributedClusterTests/LifecycleWatchTests.swift index 7130ac4a2..090bc9cb6 100644 --- a/Tests/DistributedClusterTests/LifecycleWatchTests.swift +++ b/Tests/DistributedClusterTests/LifecycleWatchTests.swift @@ -150,7 +150,7 @@ final class LifecycleWatchTests: SingleClusterSystemXCTestCase, @unchecked Senda try await juliet.meetWatch(remoteRomeo, unwatch: false) - first.cluster.down(node: second.cluster.uniqueNode.node) + first.cluster.down(endpoint: second.cluster.node.endpoint) try pj.expectMessage("Juliet init") try pr.expectMessage("Romeo init") diff --git a/Tests/DistributedClusterTests/MembershipTests.swift b/Tests/DistributedClusterTests/MembershipTests.swift index a68a14844..6504b8d86 100644 --- a/Tests/DistributedClusterTests/MembershipTests.swift +++ b/Tests/DistributedClusterTests/MembershipTests.swift @@ -17,17 +17,17 @@ import DistributedActorsTestKit import XCTest final class MembershipTests: XCTestCase { - let memberA = Cluster.Member(node: UniqueNode(node: Node(systemName: "nodeA", host: "1.1.1.1", port: 1111), nid: .random()), status: .up) - var nodeA: UniqueNode { self.memberA.uniqueNode } + let memberA = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "nodeA", host: "1.1.1.1", port: 1111), nid: .random()), status: .up) + var nodeA: Cluster.Node { self.memberA.node } - let memberB = Cluster.Member(node: UniqueNode(node: Node(systemName: "nodeB", host: "2.2.2.2", port: 2222), nid: .random()), status: .up) - var nodeB: UniqueNode { self.memberB.uniqueNode } + let memberB = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "nodeB", host: "2.2.2.2", port: 2222), nid: .random()), status: .up) + var nodeB: Cluster.Node { self.memberB.node } - let memberC = Cluster.Member(node: UniqueNode(node: Node(systemName: "nodeC", host: "3.3.3.3", port: 3333), nid: .random()), status: .up) - var nodeC: UniqueNode { self.memberC.uniqueNode } + let memberC = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "nodeC", host: "3.3.3.3", port: 3333), nid: .random()), status: .up) + var nodeC: Cluster.Node { self.memberC.node } - let memberD = Cluster.Member(node: UniqueNode(node: Node(systemName: "nodeD", host: "4.4.4.4", port: 4444), nid: .random()), status: .up) - var nodeD: UniqueNode { self.memberD.uniqueNode } + let memberD = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "nodeD", host: "4.4.4.4", port: 4444), nid: .random()), status: .up) + var nodeD: Cluster.Node { self.memberD.node } lazy var allNodes = [ nodeA, nodeB, nodeC, @@ -66,10 +66,10 @@ final class MembershipTests: XCTestCase { func test_age_ordering() { let ms = [ - Cluster.Member(node: self.memberA.uniqueNode, status: .joining), - Cluster.Member(node: self.memberA.uniqueNode, status: .up, upNumber: 1), - Cluster.Member(node: self.memberA.uniqueNode, status: .down, upNumber: 4), - Cluster.Member(node: self.memberA.uniqueNode, status: .up, upNumber: 2), + Cluster.Member(node: self.memberA.node, status: .joining), + Cluster.Member(node: self.memberA.node, status: .up, upNumber: 1), + Cluster.Member(node: self.memberA.node, status: .down, upNumber: 4), + Cluster.Member(node: self.memberA.node, status: .up, upNumber: 2), ] let ns = ms.sorted(by: Cluster.Member.ageOrdering).map(\._upNumber) ns.shouldEqual([nil, 1, 2, 4]) @@ -83,14 +83,14 @@ final class MembershipTests: XCTestCase { func test_membership_equality() { let left: Cluster.Membership = [ - Cluster.Member(node: self.memberA.uniqueNode, status: .up, upNumber: 1), - Cluster.Member(node: self.memberB.uniqueNode, status: .up, upNumber: 1), - Cluster.Member(node: self.memberC.uniqueNode, status: .up, upNumber: 1), + Cluster.Member(node: self.memberA.node, status: .up, upNumber: 1), + Cluster.Member(node: self.memberB.node, status: .up, upNumber: 1), + Cluster.Member(node: self.memberC.node, status: .up, upNumber: 1), ] let right: Cluster.Membership = [ - Cluster.Member(node: self.memberA.uniqueNode, status: .up, upNumber: 1), - Cluster.Member(node: self.memberB.uniqueNode, status: .down, upNumber: 1), - Cluster.Member(node: self.memberC.uniqueNode, status: .up, upNumber: 1), + Cluster.Member(node: self.memberA.node, status: .up, upNumber: 1), + Cluster.Member(node: self.memberB.node, status: .down, upNumber: 1), + Cluster.Member(node: self.memberC.node, status: .up, upNumber: 1), ] left.shouldNotEqual(right) @@ -107,13 +107,13 @@ final class MembershipTests: XCTestCase { self.memberA.shouldNotEqual(self.memberB) // only the node id is different: - let one = Cluster.Member(node: UniqueNode(node: Node(systemName: "firstA", host: "1.1.1.1", port: 1111), nid: .init(1)), status: .up) - let two = Cluster.Member(node: UniqueNode(node: Node(systemName: "firstA", host: "1.1.1.1", port: 1111), nid: .init(12222)), status: .up) + let one = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "firstA", host: "1.1.1.1", port: 1111), nid: .init(1)), status: .up) + let two = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "firstA", host: "1.1.1.1", port: 1111), nid: .init(12222)), status: .up) one.shouldNotEqual(two) // node names do not matter for equality: - let three = Cluster.Member(node: UniqueNode(node: Node(systemName: "does", host: "1.1.1.1", port: 1111), nid: .init(1)), status: .up) - let four = Cluster.Member(node: UniqueNode(node: Node(systemName: "not matter", host: "1.1.1.1", port: 1111), nid: .init(12222)), status: .up) + let three = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "does", host: "1.1.1.1", port: 1111), nid: .init(1)), status: .up) + let four = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "not matter", host: "1.1.1.1", port: 1111), nid: .init(12222)), status: .up) three.shouldNotEqual(four) } @@ -123,7 +123,7 @@ final class MembershipTests: XCTestCase { func test_member_replacement_shouldOfferChange() { var membership: Cluster.Membership = [self.memberA, self.memberB] let secondReplacement = Cluster.Member( - node: UniqueNode(node: Node(systemName: self.nodeB.node.systemName, host: self.nodeB.node.host, port: self.nodeB.node.port), nid: .random()), status: .up + node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: self.nodeB.systemName, host: self.nodeB.host, port: self.nodeB.port), nid: .random()), status: .up ) let change = membership.applyMembershipChange(Cluster.MembershipChange(member: secondReplacement))! @@ -135,7 +135,7 @@ final class MembershipTests: XCTestCase { membership.members(atLeast: .joining).count.shouldEqual(3) membership.members(atLeast: .down).count.shouldEqual(1) - let memberNode = membership.uniqueMember(change.member.uniqueNode) + let memberNode = membership.uniqueMember(change.member.node) memberNode?.status.shouldEqual(Cluster.MemberStatus.up) } @@ -170,7 +170,7 @@ final class MembershipTests: XCTestCase { func test_join_memberReplacement() { var membership = self.initialMembership - let replacesFirstNode = UniqueNode(node: self.nodeA.node, nid: .random()) + let replacesFirstNode = Cluster.Node(endpoint: self.nodeA.endpoint, nid: .random()) let change = membership.join(replacesFirstNode)! @@ -184,7 +184,7 @@ final class MembershipTests: XCTestCase { func test_apply_memberReplacement_withUpNode() throws { var membership = self.initialMembership - let firstReplacement = Cluster.Member(node: UniqueNode(node: self.nodeA.node, nid: .init(111_111)), status: .up) + let firstReplacement = Cluster.Member(node: Cluster.Node(endpoint: self.nodeA.endpoint, nid: .init(111_111)), status: .up) let changeToApply = Cluster.MembershipChange(member: firstReplacement) guard let change = membership.applyMembershipChange(changeToApply) else { @@ -194,13 +194,13 @@ final class MembershipTests: XCTestCase { change.isReplacement.shouldBeTrue() change.replaced.shouldEqual(self.memberA) change.replaced!.status.shouldEqual(self.memberA.status) - change.node.shouldEqual(firstReplacement.uniqueNode) + change.node.shouldEqual(firstReplacement.node) change.status.shouldEqual(firstReplacement.status) } func test_apply_withNodeNotPartOfClusterAnymore_leaving() throws { var membership = self.initialMembership - _ = membership.removeCompletely(self.memberC.uniqueNode) + _ = membership.removeCompletely(self.memberC.node) let changeToApply = Cluster.MembershipChange(member: self.memberC, toStatus: .leaving) if let change = membership.applyMembershipChange(changeToApply) { @@ -210,7 +210,7 @@ final class MembershipTests: XCTestCase { func test_apply_withNodeNotPartOfClusterAnymore_down() throws { var membership = self.initialMembership - _ = membership.removeCompletely(self.memberC.uniqueNode) + _ = membership.removeCompletely(self.memberC.node) let changeToApply = Cluster.MembershipChange(member: self.memberC, toStatus: .down) if let change = membership.applyMembershipChange(changeToApply) { @@ -221,17 +221,17 @@ final class MembershipTests: XCTestCase { func test_apply_memberRemoval() throws { var membership = self.initialMembership - let removal = Cluster.Member(node: self.memberA.uniqueNode, status: .removed) + let removal = Cluster.Member(node: self.memberA.node, status: .removed) guard let change = membership.applyMembershipChange(Cluster.MembershipChange(member: removal)) else { throw TestError("Expected a change, but didn't get one") } change.isReplacement.shouldBeFalse() - change.node.shouldEqual(removal.uniqueNode) + change.node.shouldEqual(removal.node) change.status.shouldEqual(removal.status) - membership.uniqueMember(self.memberA.uniqueNode).shouldBeNil() + membership.uniqueMember(self.memberA.node).shouldBeNil() } // ==== ------------------------------------------------------------------------------------------------------------ @@ -241,7 +241,7 @@ final class MembershipTests: XCTestCase { self.initialMembership.members(atLeast: .joining).count.shouldEqual(3) self.initialMembership.members(atLeast: .up).count.shouldEqual(3) var changed = self.initialMembership - _ = changed.mark(self.memberA.uniqueNode, as: .down) + _ = changed.mark(self.memberA.node, as: .down) changed.count(atLeast: .joining).shouldEqual(3) changed.count(atLeast: .up).shouldEqual(3) changed.count(atLeast: .leaving).shouldEqual(1) @@ -251,10 +251,10 @@ final class MembershipTests: XCTestCase { func test_members_listing_filteringByReachability() { var changed = self.initialMembership - _ = changed.mark(self.memberA.uniqueNode, as: .down) + _ = changed.mark(self.memberA.node, as: .down) - _ = changed.mark(self.memberA.uniqueNode, reachability: .unreachable) - _ = changed.mark(self.memberB.uniqueNode, reachability: .unreachable) + _ = changed.mark(self.memberA.node, reachability: .unreachable) + _ = changed.mark(self.memberB.node, reachability: .unreachable) // exact status match @@ -295,16 +295,16 @@ final class MembershipTests: XCTestCase { // MARK: Marking func test_mark_shouldOnlyProceedForwardInStatuses() { - let member = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .joining) + let member = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .joining) var membership: Cluster.Membership = [member] // marking no-member -> no-op - let noChange = membership.mark(member.uniqueNode, as: .joining) + let noChange = membership.mark(member.node, as: .joining) noChange.shouldBeNil() // already joining - let change1 = membership.mark(member.uniqueNode, as: .up) + let change1 = membership.mark(member.node, as: .up) change1.shouldNotBeNil() // testing string output as well as field on purpose @@ -313,38 +313,38 @@ final class MembershipTests: XCTestCase { change1?.status.shouldEqual(.up) "\(change1!)".shouldContain("1001 :: [joining] -> [ up]") - membership.mark(member.uniqueNode, as: .joining).shouldBeNil() // can't move "back" - membership.mark(member.uniqueNode, as: .up).shouldBeNil() // don't move to "same" + membership.mark(member.node, as: .joining).shouldBeNil() // can't move "back" + membership.mark(member.node, as: .up).shouldBeNil() // don't move to "same" - let change2 = membership.mark(member.uniqueNode, as: .down) + let change2 = membership.mark(member.node, as: .down) change2.shouldNotBeNil() change2?.previousStatus.shouldEqual(.up) change2?.status.shouldEqual(.down) "\(change2!)".shouldContain("1001 :: [ up] -> [ down]") - membership.mark(member.uniqueNode, as: .joining).shouldBeNil() // can't move "back" - membership.mark(member.uniqueNode, as: .up).shouldBeNil() // can't move "back", from down + membership.mark(member.node, as: .joining).shouldBeNil() // can't move "back" + membership.mark(member.node, as: .up).shouldBeNil() // can't move "back", from down } func test_mark_shouldNotReturnChangeForMarkingAsSameStatus() { let member = self.memberA var membership: Cluster.Membership = [member] - let noChange = membership.mark(member.uniqueNode, as: member.status) + let noChange = membership.mark(member.node, as: member.status) noChange.shouldBeNil() } func test_mark_reachability() { - let member = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .joining) + let member = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "4.4.4.4", port: 1001), nid: .random()), status: .joining) var membership: Cluster.Membership = [member] - membership.mark(member.uniqueNode, reachability: .reachable).shouldEqual(nil) // no change + membership.mark(member.node, reachability: .reachable).shouldEqual(nil) // no change - let res1 = membership.mark(member.uniqueNode, reachability: .unreachable) + let res1 = membership.mark(member.node, reachability: .unreachable) res1!.reachability.shouldEqual(.unreachable) - membership.mark(member.uniqueNode, reachability: .unreachable).shouldEqual(nil) // no change - _ = membership.mark(member.uniqueNode, reachability: .unreachable) + membership.mark(member.node, reachability: .unreachable).shouldEqual(nil) // no change + _ = membership.mark(member.node, reachability: .unreachable) } // ==== ---------------------------------------------------------------------------------------------------------------- @@ -352,8 +352,8 @@ final class MembershipTests: XCTestCase { func test_join_overAnExistingNode_replacement() { var membership = self.initialMembership - let secondReplacement = Cluster.Member(node: UniqueNode(node: self.nodeB.node, nid: .random()), status: .joining) - let change = membership.join(secondReplacement.uniqueNode)! + let secondReplacement = Cluster.Member(node: Cluster.Node(endpoint: self.nodeB.endpoint, nid: .random()), status: .joining) + let change = membership.join(secondReplacement.node)! change.isReplacement.shouldBeTrue() let members = membership.members(atLeast: .joining) @@ -368,27 +368,27 @@ final class MembershipTests: XCTestCase { func test_mark_replacement() throws { var membership: Cluster.Membership = [self.memberA] - let firstReplacement = Cluster.Member(node: UniqueNode(node: self.nodeA.node, nid: .random()), status: .up) + let firstReplacement = Cluster.Member(node: Cluster.Node(endpoint: self.nodeA.endpoint, nid: .random()), status: .up) - guard let change = membership.mark(firstReplacement.uniqueNode, as: firstReplacement.status) else { + guard let change = membership.mark(firstReplacement.node, as: firstReplacement.status) else { throw TestError("Expected a change") } change.isReplacement.shouldBeTrue() change.replaced.shouldEqual(self.memberA) change.previousStatus.shouldEqual(.up) - change.node.shouldEqual(firstReplacement.uniqueNode) + change.node.shouldEqual(firstReplacement.node) change.status.shouldEqual(.up) } func test_mark_status_whenReplacingWithNewNode() { - let one = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "1.1.1.1", port: 1001), nid: .random()), status: .joining) - var two = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "2.2.2.2", port: 2222), nid: .random()), status: .up) - let twoReplacement = Cluster.Member(node: UniqueNode(node: Node(systemName: "System", host: "2.2.2.2", port: 2222), nid: .random()), status: .joining) + let one = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "1.1.1.1", port: 1001), nid: .random()), status: .joining) + var two = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "2.2.2.2", port: 2222), nid: .random()), status: .up) + let twoReplacement = Cluster.Member(node: Cluster.Node(endpoint: Cluster.Endpoint(systemName: "System", host: "2.2.2.2", port: 2222), nid: .random()), status: .joining) var membership: Cluster.Membership = [one, two] - let changed = membership.mark(twoReplacement.uniqueNode, as: .joining)! - changed.member.uniqueNode.shouldEqual(twoReplacement.uniqueNode) + let changed = membership.mark(twoReplacement.node, as: .joining)! + changed.member.node.shouldEqual(twoReplacement.node) changed.status.isJoining.shouldBeTrue() two.status = .down @@ -399,13 +399,13 @@ final class MembershipTests: XCTestCase { var existing = self.memberA existing.status = .joining - let replacement = Cluster.Member(node: UniqueNode(node: existing.uniqueNode.node, nid: .random()), status: .up) + let replacement = Cluster.Member(node: Cluster.Node(endpoint: existing.node.endpoint, nid: .random()), status: .up) let change = Cluster.MembershipChange(replaced: existing, by: replacement) change.isReplacement.shouldBeTrue() change.member.shouldEqual(replacement) - change.node.shouldEqual(replacement.uniqueNode) + change.node.shouldEqual(replacement.node) change.previousStatus.shouldEqual(existing.status) change.replaced!.status.shouldEqual(existing.status) // though we have the replaced member, it will have its own previous status @@ -489,37 +489,37 @@ final class MembershipTests: XCTestCase { } func test_membershipDiff_shouldIncludeEntry_whenStatusChangedForIt() { - let changed = self.initialMembership.marking(self.memberA.uniqueNode, as: .leaving) + let changed = self.initialMembership.marking(self.memberA.node, as: .leaving) let diff = Cluster.Membership._diff(from: self.initialMembership, to: changed) diff.changes.count.shouldEqual(1) let diffEntry = diff.changes.first! - diffEntry.node.shouldEqual(self.memberA.uniqueNode) + diffEntry.node.shouldEqual(self.memberA.node) diffEntry.previousStatus?.shouldEqual(.up) diffEntry.status.shouldEqual(.leaving) } func test_membershipDiff_shouldIncludeEntry_whenMemberRemoved() { - let changed = self.initialMembership.removingCompletely(self.memberA.uniqueNode) + let changed = self.initialMembership.removingCompletely(self.memberA.node) let diff = Cluster.Membership._diff(from: self.initialMembership, to: changed) diff.changes.count.shouldEqual(1) let diffEntry = diff.changes.first! - diffEntry.node.shouldEqual(self.memberA.uniqueNode) + diffEntry.node.shouldEqual(self.memberA.node) diffEntry.previousStatus?.shouldEqual(.up) diffEntry.status.shouldEqual(.removed) } func test_membershipDiff_shouldIncludeEntry_whenMemberAdded() { - let changed = self.initialMembership.joining(self.memberD.uniqueNode) + let changed = self.initialMembership.joining(self.memberD.node) let diff = Cluster.Membership._diff(from: self.initialMembership, to: changed) diff.changes.count.shouldEqual(1) let diffEntry = diff.changes.first! - diffEntry.node.shouldEqual(self.memberD.uniqueNode) + diffEntry.node.shouldEqual(self.memberD.node) diffEntry.previousStatus.shouldBeNil() diffEntry.status.shouldEqual(.joining) } @@ -540,12 +540,12 @@ final class MembershipTests: XCTestCase { func test_mergeForward_fromAhead_membership_withAdditionalMember() { var membership = self.initialMembership var ahead = membership - _ = ahead.join(self.memberD.uniqueNode)! + _ = ahead.join(self.memberD.node)! let changes = membership.mergeFrom(incoming: ahead, myself: nil) changes.count.shouldEqual(1) - membership.shouldEqual(self.initialMembership.joining(self.memberD.uniqueNode)) + membership.shouldEqual(self.initialMembership.joining(self.memberD.node)) } func test_mergeForward_fromAhead_membership_withMemberNowDown() { diff --git a/Tests/DistributedClusterTests/Metrics/SWIMActorPeerMetricsTests.swift b/Tests/DistributedClusterTests/Metrics/SWIMActorPeerMetricsTests.swift index ec6d45605..cba88f11a 100644 --- a/Tests/DistributedClusterTests/Metrics/SWIMActorPeerMetricsTests.swift +++ b/Tests/DistributedClusterTests/Metrics/SWIMActorPeerMetricsTests.swift @@ -45,8 +45,8 @@ final class ActorMetricsSWIMActorPeerMetricsTests: ClusteredActorSystemsXCTestCa } let targetNode = await setUpNode("target") - originNode.cluster.join(node: targetNode.cluster.uniqueNode) - try assertAssociated(originNode, withExactly: targetNode.cluster.uniqueNode) + originNode.cluster.join(endpoint: targetNode.cluster.endpoint) + try assertAssociated(originNode, withExactly: targetNode.cluster.node) guard let origin = originNode._cluster?._swimShell else { throw testKit(originNode).fail("SWIM shell of [\(originNode)] should not be nil") @@ -93,9 +93,9 @@ final class ActorMetricsSWIMActorPeerMetricsTests: ClusteredActorSystemsXCTestCa let targetNode = await setUpNode("target") let throughNode = await setUpNode("through") - originNode.cluster.join(node: throughNode.cluster.uniqueNode) - targetNode.cluster.join(node: throughNode.cluster.uniqueNode) - try assertAssociated(originNode, withExactly: [targetNode.cluster.uniqueNode, throughNode.cluster.uniqueNode]) + originNode.cluster.join(endpoint: throughNode.cluster.endpoint) + targetNode.cluster.join(endpoint: throughNode.cluster.endpoint) + try assertAssociated(originNode, withExactly: [targetNode.cluster.node, throughNode.cluster.node]) guard let origin = originNode._cluster?._swimShell else { throw testKit(originNode).fail("SWIM shell of [\(originNode)] should not be nil") diff --git a/Tests/DistributedClusterTests/NodeDeathWatcherTests.swift b/Tests/DistributedClusterTests/NodeDeathWatcherTests.swift index c3fbad4de..a248e6f8d 100644 --- a/Tests/DistributedClusterTests/NodeDeathWatcherTests.swift +++ b/Tests/DistributedClusterTests/NodeDeathWatcherTests.swift @@ -56,8 +56,8 @@ final class NodeDeathWatcherTests: ClusteredActorSystemsXCTestCase { } ) - try await self.ensureNodes(.up, nodes: first.cluster.uniqueNode, second.cluster.uniqueNode) - first.cluster.down(node: second.cluster.uniqueNode.node) + try await self.ensureNodes(.up, nodes: first.cluster.node, second.cluster.node) + first.cluster.down(endpoint: second.cluster.node.endpoint) // should cause termination of all remote actors, observed by the local actors on [first] let termination1: _Signals.Terminated = try p.expectMessage() @@ -71,7 +71,7 @@ final class NodeDeathWatcherTests: ClusteredActorSystemsXCTestCase { }) // should not trigger terminated again for any of the remote refs - first.cluster.down(node: second.cluster.uniqueNode.node) + first.cluster.down(endpoint: second.cluster.node.endpoint) try p.expectNoMessage(for: .milliseconds(50)) } } diff --git a/Tests/DistributedClusterTests/NodeTests.swift b/Tests/DistributedClusterTests/NodeTests.swift deleted file mode 100644 index ad8140eb6..000000000 --- a/Tests/DistributedClusterTests/NodeTests.swift +++ /dev/null @@ -1,57 +0,0 @@ -//===----------------------------------------------------------------------===// -// -// This source file is part of the Swift Distributed Actors open source project -// -// Copyright (c) 2018-2019 Apple Inc. and the Swift Distributed Actors project authors -// Licensed under Apache License v2.0 -// -// See LICENSE.txt for license information -// See CONTRIBUTORS.md for the list of Swift Distributed Actors project authors -// -// SPDX-License-Identifier: Apache-2.0 -// -//===----------------------------------------------------------------------===// - -import DistributedActorsTestKit -@testable import DistributedCluster -import Foundation -import XCTest - -final class NodeTests: XCTestCase { - // ==== ---------------------------------------------------------------------------------------------------------------- - // MARK: Node - - func test_nodes_equal_whenHostPortMatch() { - let alpha = Node(systemName: "SystemNameAlpha", host: "111.111.11.1", port: 1111) - let beta = Node(systemName: "SystemNameBeta", host: "111.111.11.1", port: 1111) - - // system names are only for human readability / debugging, not equality - alpha.shouldEqual(beta) - } - - // ==== ---------------------------------------------------------------------------------------------------------------- - // MARK: UniqueNode - - func test_uniqueNode_shouldRenderProperly() { - let node = Node(systemName: "SystemName", host: "188.121.122.3", port: 1111) - let uniqueNode = UniqueNode(node: node, nid: UniqueNodeID(2222)) - - "\(uniqueNode)".shouldEqual("sact://SystemName@188.121.122.3:1111") - "\(String(reflecting: uniqueNode))".shouldEqual("sact://SystemName:2222@188.121.122.3:1111") - } - - func test_uniqueNode_comparison_equal() { - let two = UniqueNode(node: Node(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: UniqueNodeID(2222)) - let anotherTwo = two - - two.shouldEqual(anotherTwo) - two.shouldBeLessThanOrEqual(anotherTwo) - } - - func test_uniqueNode_comparison_lessThan() { - let two = UniqueNode(node: Node(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: UniqueNodeID(2222)) - let three = UniqueNode(node: Node(systemName: "SystemName", host: "188.121.122.3", port: 1111), nid: UniqueNodeID(3333)) - - two.shouldBeLessThan(three) - } -} diff --git a/Tests/DistributedClusterTests/Plugins/ClusterSingleton/ClusterSingletonPluginClusteredTests.swift b/Tests/DistributedClusterTests/Plugins/ClusterSingleton/ClusterSingletonPluginClusteredTests.swift index ea4b79959..55d10bc5c 100644 --- a/Tests/DistributedClusterTests/Plugins/ClusterSingleton/ClusterSingletonPluginClusteredTests.swift +++ b/Tests/DistributedClusterTests/Plugins/ClusterSingleton/ClusterSingletonPluginClusteredTests.swift @@ -33,17 +33,17 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas singletonSettings.allocationStrategy = .byLeadership let first = await self.setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let second = await self.setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let third = await self.setUpNode("third") { settings in - settings.node.port = 9333 + settings.endpoint.port = 9333 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } @@ -60,11 +60,11 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas TheSingleton(greeting: "Hello-3", actorSystem: actorSystem) } - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: first.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: first.cluster.node.endpoint) // `first` will be the leader (lowest address) and runs the singleton - try await self.ensureNodes(.up, on: first, within: .seconds(10), nodes: second.cluster.uniqueNode, third.cluster.uniqueNode) + try await self.ensureNodes(.up, on: first, within: .seconds(10), nodes: second.cluster.node, third.cluster.node) try await self.assertSingletonRequestReply(first, singleton: ref1, greetingName: "Alice", expectedPrefix: "Hello-1 Alice!") try await self.assertSingletonRequestReply(second, singleton: ref2, greetingName: "Bob", expectedPrefix: "Hello-1 Bob!") @@ -76,7 +76,7 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas singletonSettings.allocationStrategy = .byLeadership let first = await self.setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 1) // just myself settings.plugins.install(plugin: ClusterSingletonPlugin()) } @@ -110,17 +110,17 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas singletonSettings.allocationTimeout = .seconds(15) let first = await self.setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let second = await self.setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let third = await self.setUpNode("third") { settings in - settings.node.port = 9333 + settings.endpoint.port = 9333 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } @@ -153,8 +153,8 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas try await withThrowingTaskGroup(of: TaskType.self) { group in group.addTask { // Set up the cluster - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: first.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: first.cluster.node.endpoint) // `first` will be the leader (lowest address) and runs the singleton. // @@ -187,22 +187,22 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas singletonSettings.allocationTimeout = .seconds(15) let first = await self.setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let second = await self.setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let third = await self.setUpNode("third") { settings in - settings.node.port = 9333 + settings.endpoint.port = 9333 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } let fourth = await self.setUpNode("fourth") { settings in - settings.node.port = 7444 + settings.endpoint.port = 7444 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 3) settings += ClusterSingletonPlugin() } @@ -222,19 +222,19 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas TheSingleton(greeting: "Hello-4", actorSystem: actorSystem) } - first.cluster.join(node: second.cluster.uniqueNode.node) - third.cluster.join(node: first.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) + third.cluster.join(endpoint: first.cluster.node.endpoint) // `first` will be the leader (lowest address) and runs the singleton - try await self.ensureNodes(.up, on: first, within: .seconds(10), nodes: second.cluster.uniqueNode, third.cluster.uniqueNode) - pinfo("Nodes up: \([first.cluster.uniqueNode, second.cluster.uniqueNode, third.cluster.uniqueNode])") + try await self.ensureNodes(.up, on: first, within: .seconds(10), nodes: second.cluster.node, third.cluster.node) + pinfo("Nodes up: \([first.cluster.node, second.cluster.node, third.cluster.node])") try await self.assertSingletonRequestReply(first, singleton: ref1, greetingName: "Alice", expectedPrefix: "Hello-1 Alice!") try await self.assertSingletonRequestReply(second, singleton: ref2, greetingName: "Bob", expectedPrefix: "Hello-1 Bob!") try await self.assertSingletonRequestReply(third, singleton: ref3, greetingName: "Charlie", expectedPrefix: "Hello-1 Charlie!") pinfo("All three nodes communicated with singleton") - let firstNode = first.cluster.uniqueNode + let firstNode = first.cluster.node first.cluster.leave() // Make sure that `second` and `third` see `first` as down and become leader-less @@ -245,11 +245,11 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas try self.assertLeaderNode(on: second, is: nil) try self.assertLeaderNode(on: third, is: nil) } - pinfo("Node \(firstNode) left cluster...") + pinfo("Endpoint \(firstNode) left cluster...") // `fourth` will become the new leader and singleton - pinfo("Node \(fourth.cluster.uniqueNode) joining cluster...") - fourth.cluster.join(node: second.cluster.uniqueNode.node) + pinfo("Endpoint \(fourth.cluster.node) joining cluster...") + fourth.cluster.join(endpoint: second.cluster.node.endpoint) let start = ContinuousClock.Instant.now // No leader so singleton is not available, messages sent should be stashed @@ -278,8 +278,8 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas let ref2Task = requestReplyTask(singleton: ref2, greetingName: "Bob") let ref3Task = requestReplyTask(singleton: ref2, greetingName: "Charlie") - try await self.ensureNodes(.up, on: second, within: .seconds(10), nodes: third.cluster.uniqueNode, fourth.cluster.uniqueNode) - pinfo("Fourth node joined, will become leader; Members now: \([fourth.cluster.uniqueNode, second.cluster.uniqueNode, third.cluster.uniqueNode])") + try await self.ensureNodes(.up, on: second, within: .seconds(10), nodes: third.cluster.node, fourth.cluster.node) + pinfo("Fourth node joined, will become leader; Members now: \([fourth.cluster.node, second.cluster.node, third.cluster.node])") ref2Task.cancel() ref3Task.cancel() @@ -322,12 +322,12 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas singletonSettings.allocationTimeout = .milliseconds(100) let first = await self.setUpNode("first") { settings in - settings.node.port = 7111 + settings.endpoint.port = 7111 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 2) settings += ClusterSingletonPlugin() } let second = await self.setUpNode("second") { settings in - settings.node.port = 8222 + settings.endpoint.port = 8222 settings.autoLeaderElection = .lowestReachable(minNumberOfMembers: 2) settings += ClusterSingletonPlugin() } @@ -341,14 +341,14 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas TheSingleton(greeting: "Hello-2", actorSystem: actorSystem) } - first.cluster.join(node: second.cluster.uniqueNode.node) + first.cluster.join(endpoint: second.cluster.node.endpoint) // `first` will be the leader (lowest address) and runs the singleton - try await self.ensureNodes(.up, on: first, nodes: second.cluster.uniqueNode) + try await self.ensureNodes(.up, on: first, nodes: second.cluster.node) try await self.assertSingletonRequestReply(second, singleton: ref2, greetingName: "Bob", expectedPrefix: "Hello-1 Bob!") - let firstNode = first.cluster.uniqueNode + let firstNode = first.cluster.node first.cluster.leave() try await self.assertMemberStatus(on: second, node: firstNode, is: .down, within: .seconds(10)) @@ -385,7 +385,7 @@ final class ClusterSingletonPluginClusteredTests: ClusteredActorSystemsXCTestCas } catch { throw TestError( """ - Received no reply from singleton [\(singleton)] while sending from [\(system.cluster.uniqueNode.node)], \ + Received no reply from singleton [\(singleton)] while sending from [\(system.cluster.node.endpoint)], \ perhaps request was lost. Sent greeting [\(greetingName)] and expected prefix: [\(expectedPrefix)] (attempts: \(attempts)) """) } @@ -404,7 +404,7 @@ distributed actor TheSingleton: ClusterSingleton { } distributed func greet(name: String) -> String { - "\(self.greeting) \(name)! (from node: \(self.id.uniqueNode), id: \(self.id.detailedDescription))" + "\(self.greeting) \(name)! (from node: \(self.id.node), id: \(self.id.detailedDescription))" } } diff --git a/Tests/DistributedClusterTests/RemoteCallTests.swift b/Tests/DistributedClusterTests/RemoteCallTests.swift index 49949e472..1f692a3e8 100644 --- a/Tests/DistributedClusterTests/RemoteCallTests.swift +++ b/Tests/DistributedClusterTests/RemoteCallTests.swift @@ -25,7 +25,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { let remote = await setUpNode("remote") { settings in settings.serialization.registerInbound(GreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -43,7 +43,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { let remote = await setUpNode("remote") { settings in settings.serialization.registerInbound(GreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -63,7 +63,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { let remote = await setUpNode("remote") { settings in settings.serialization.registerInbound(GreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -82,7 +82,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { let remote = await setUpNode("remote") { settings in settings.serialization.registerInbound(GreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -155,7 +155,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { func test_remoteCallVoid_shouldConfigureTimeout() async throws { let local = await setUpNode("local") let remote = await setUpNode("remote") - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -177,7 +177,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { func test_remoteCallGeneric() async throws { let local = await setUpNode("local") let remote = await setUpNode("remote") - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -200,7 +200,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { settings.serialization.registerInbound(GreeterCodableError.self) settings.serialization.registerInbound(AnotherGreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -224,7 +224,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { settings.serialization.registerInbound(GreeterCodableError.self) settings.serialization.registerInbound(AnotherGreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -249,7 +249,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { settings.serialization.registerInbound(GreeterCodableError.self) settings.serialization.registerInbound(AnotherGreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -269,7 +269,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { let remote = await setUpNode("remote") { settings in settings.remoteCall.codableErrorAllowance = .custom(allowedTypes: [GreeterCodableError.self, AnotherGreeterCodableError.self]) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) @@ -295,7 +295,7 @@ final class RemoteCallTests: ClusteredActorSystemsXCTestCase { settings.serialization.registerInbound(GreeterCodableError.self) settings.serialization.registerInbound(AnotherGreeterCodableError.self) } - local.cluster.join(node: remote.cluster.uniqueNode) + local.cluster.join(endpoint: remote.cluster.endpoint) let greeter = Greeter(actorSystem: local) let remoteGreeterRef = try Greeter.resolve(id: greeter.id, using: remote) diff --git a/Tests/DistributedClusterTests/SerializationTests.swift b/Tests/DistributedClusterTests/SerializationTests.swift index 12e90fc26..107ae1e1e 100644 --- a/Tests/DistributedClusterTests/SerializationTests.swift +++ b/Tests/DistributedClusterTests/SerializationTests.swift @@ -85,7 +85,7 @@ class SerializationTests: SingleClusterSystemXCTestCase { } func test_serialize_actorAddress_usingContext() throws { - let node = UniqueNode(systemName: "one", host: "127.0.0.1", port: 1234, nid: UniqueNodeID(11111)) + let node = Cluster.Node(systemName: "one", host: "127.0.0.1", port: 1234, nid: Cluster.Node.ID(11111)) let id = try ActorPath(root: "user").appending("hello").makeLocalID(on: node, incarnation: .random()) let encoder = JSONEncoder() @@ -170,7 +170,7 @@ class SerializationTests: SingleClusterSystemXCTestCase { let serializedFormat: String = serialized.buffer.stringDebugDescription() pinfo("serialized ref: \(serializedFormat)") serializedFormat.shouldContain("sact") - serializedFormat.shouldContain("\(remoteCapableSystem.settings.uniqueBindNode.nid)") + serializedFormat.shouldContain("\(remoteCapableSystem.settings.bindNode.nid)") serializedFormat.shouldContain(remoteCapableSystem.name) // automatically picked up name from system serializedFormat.shouldContain("\(remoteCapableSystem.settings.bindHost)") serializedFormat.shouldContain("\(remoteCapableSystem.settings.bindPort)") diff --git a/scripts/generate_protos.sh b/scripts/generate_protos.sh index 991b36a67..6d7b1132a 100755 --- a/scripts/generate_protos.sh +++ b/scripts/generate_protos.sh @@ -68,7 +68,7 @@ done popd >> /dev/null declare -a internal_proto_paths -internal_proto_paths=( "$root_path/Sources/DistributedActorsBenchmarks/BenchmarkProtos" "$root_path/Tests/DistributedActorsDocumentationTests/DocumentationProtos" ) +internal_proto_paths=( "$root_path/Tests/DistributedActorsDocumentationTests/DocumentationProtos" ) for internal_proto_path in "${internal_proto_paths[@]}"; do (