Committed by
Gerrit Code Review
FlowRuleStore: Consider errors when updating state of all current backups
Change-Id: I3bf4d20d79dc37c7040648ec6379794b8c93aad2
Showing
1 changed file
with
33 additions
and
14 deletions
... | @@ -290,7 +290,7 @@ public class NewDistributedFlowRuleStore | ... | @@ -290,7 +290,7 @@ public class NewDistributedFlowRuleStore |
290 | clusterCommunicator.addSubscriber( | 290 | clusterCommunicator.addSubscriber( |
291 | REMOVE_FLOW_ENTRY, SERIALIZER::decode, this::removeFlowRuleInternal, SERIALIZER::encode, executor); | 291 | REMOVE_FLOW_ENTRY, SERIALIZER::decode, this::removeFlowRuleInternal, SERIALIZER::encode, executor); |
292 | clusterCommunicator.addSubscriber( | 292 | clusterCommunicator.addSubscriber( |
293 | - FLOW_TABLE_BACKUP, SERIALIZER::decode, flowTable::onBackupReceipt, executor); | 293 | + FLOW_TABLE_BACKUP, SERIALIZER::decode, flowTable::onBackupReceipt, SERIALIZER::encode, executor); |
294 | } | 294 | } |
295 | 295 | ||
296 | private void unregisterMessageHandlers() { | 296 | private void unregisterMessageHandlers() { |
... | @@ -644,21 +644,33 @@ public class NewDistributedFlowRuleStore | ... | @@ -644,21 +644,33 @@ public class NewDistributedFlowRuleStore |
644 | 644 | ||
645 | private void backupFlowEntries(NodeId nodeId, Set<DeviceId> deviceIds) { | 645 | private void backupFlowEntries(NodeId nodeId, Set<DeviceId> deviceIds) { |
646 | log.debug("Sending flowEntries for devices {} to {} as backup.", deviceIds, nodeId); | 646 | log.debug("Sending flowEntries for devices {} to {} as backup.", deviceIds, nodeId); |
647 | - Map<DeviceId, ConcurrentMap<FlowId, Set<StoredFlowEntry>>> deviceFlowEntries = | 647 | + Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> deviceFlowEntries = |
648 | Maps.newConcurrentMap(); | 648 | Maps.newConcurrentMap(); |
649 | flowEntries.forEach((key, value) -> { | 649 | flowEntries.forEach((key, value) -> { |
650 | if (deviceIds.contains(key)) { | 650 | if (deviceIds.contains(key)) { |
651 | deviceFlowEntries.put(key, value); | 651 | deviceFlowEntries.put(key, value); |
652 | } | 652 | } |
653 | }); | 653 | }); |
654 | - clusterCommunicator.unicast(deviceFlowEntries, | 654 | + clusterCommunicator.<Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>>, Set<DeviceId>>sendAndReceive( |
655 | - FLOW_TABLE_BACKUP, | 655 | + deviceFlowEntries, |
656 | - SERIALIZER::encode, | 656 | + FLOW_TABLE_BACKUP, |
657 | - nodeId); | 657 | + SERIALIZER::encode, |
658 | - deviceIds.forEach(id -> { | 658 | + SERIALIZER::decode, |
659 | - lastBackupTimes.put(id, System.currentTimeMillis()); | 659 | + nodeId) |
660 | - lastBackupNodes.put(id, nodeId); | 660 | + .whenComplete((backedupDevices, error) -> { |
661 | - }); | 661 | + Set<DeviceId> devicesNotBackedup = error != null ? |
662 | + deviceFlowEntries.keySet() : | ||
663 | + Sets.difference(deviceFlowEntries.keySet(), backedupDevices); | ||
664 | + if (devicesNotBackedup.size() > 0) { | ||
665 | + log.warn("Failed to backup devices: {}", devicesNotBackedup, error); | ||
666 | + } | ||
667 | + if (backedupDevices != null) { | ||
668 | + backedupDevices.forEach(id -> { | ||
669 | + lastBackupTimes.put(id, System.currentTimeMillis()); | ||
670 | + lastBackupNodes.put(id, nodeId); | ||
671 | + }); | ||
672 | + } | ||
673 | + }); | ||
662 | } | 674 | } |
663 | 675 | ||
664 | /** | 676 | /** |
... | @@ -751,16 +763,23 @@ public class NewDistributedFlowRuleStore | ... | @@ -751,16 +763,23 @@ public class NewDistributedFlowRuleStore |
751 | } | 763 | } |
752 | } | 764 | } |
753 | 765 | ||
754 | - private void onBackupReceipt(Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> flowTables) { | 766 | + private Set<DeviceId> onBackupReceipt(Map<DeviceId, Map<FlowId, Set<StoredFlowEntry>>> flowTables) { |
755 | log.debug("Received flowEntries for {} to backup", flowTables.keySet()); | 767 | log.debug("Received flowEntries for {} to backup", flowTables.keySet()); |
756 | - Set<DeviceId> managedDevices = mastershipService.getDevicesOf(local); | 768 | + Set<DeviceId> backedupDevices = Sets.newHashSet(); |
757 | - // Only process those devices are that not managed by the local node. | 769 | + try { |
758 | - Maps.filterKeys(flowTables, deviceId -> !managedDevices.contains(deviceId)) | 770 | + Set<DeviceId> managedDevices = mastershipService.getDevicesOf(local); |
771 | + // Only process those devices are that not managed by the local node. | ||
772 | + Maps.filterKeys(flowTables, deviceId -> !managedDevices.contains(deviceId)) | ||
759 | .forEach((deviceId, flowTable) -> { | 773 | .forEach((deviceId, flowTable) -> { |
760 | Map<FlowId, Set<StoredFlowEntry>> deviceFlowTable = getFlowTable(deviceId); | 774 | Map<FlowId, Set<StoredFlowEntry>> deviceFlowTable = getFlowTable(deviceId); |
761 | deviceFlowTable.clear(); | 775 | deviceFlowTable.clear(); |
762 | deviceFlowTable.putAll(flowTable); | 776 | deviceFlowTable.putAll(flowTable); |
777 | + backedupDevices.add(deviceId); | ||
763 | }); | 778 | }); |
779 | + } catch (Exception e) { | ||
780 | + log.warn("Failure processing backup request", e); | ||
781 | + } | ||
782 | + return backedupDevices; | ||
764 | } | 783 | } |
765 | } | 784 | } |
766 | } | 785 | } | ... | ... |
-
Please register or login to post a comment