Brian O'Connor
Committed by Gerrit Code Review

cleaning up some TODOs

Change-Id: Ib2380e9533ba30c6f9fdf79aed1879dbbe3589a7
......@@ -194,8 +194,8 @@ public class ObjectiveTracker implements ObjectiveTrackerService {
|| (linkEvent.type() == LINK_UPDATED &&
linkEvent.subject().isDurable())) {
final LinkKey linkKey = linkKey(linkEvent.subject());
Set<IntentId> intentIds = intentsByLink.get(linkKey);
synchronized (intentsByLink) {
Set<IntentId> intentIds = intentsByLink.get(linkKey);
log.debug("recompile triggered by LinkDown {} {}", linkKey, intentIds);
toBeRecompiled.addAll(intentIds);
}
......@@ -206,7 +206,6 @@ public class ObjectiveTracker implements ObjectiveTrackerService {
linkEvent.subject().isDurable()));
}
}
delegate.triggerCompile(toBeRecompiled, !recompileOnly);
}
}
......
......@@ -175,7 +175,7 @@ public class HazelcastLeadershipService implements LeadershipService,
for (Topic topic : topics.values()) {
Leadership leadership = new Leadership(topic.topicName(),
topic.leader(),
0L); // TODO: epoch not used
0L);
result.put(topic.topicName(), leadership);
}
return result;
......
......@@ -51,7 +51,6 @@ public class DistributedIdBlockStore implements IdBlockStore {
@Override
public IdBlock getIdBlock(String topic) {
//TODO need to persist this value across cluster failures
Long blockBase = theInstance.getAtomicLong(topic).getAndAdd(DEFAULT_BLOCK_SIZE);
return new IdBlock(blockBase, DEFAULT_BLOCK_SIZE);
}
......
......@@ -130,7 +130,6 @@ public class DistributedFlowRuleStore
private final AtomicInteger localBatchIdGen = new AtomicInteger();
// TODO: make this configurable
private int pendingFutureTimeoutMinutes = 5;
private Cache<Integer, SettableFuture<CompletedBatchOperation>> pendingFutures =
......@@ -149,7 +148,6 @@ public class DistributedFlowRuleStore
private final ExecutorService backupExecutors =
Executors.newSingleThreadExecutor(namedThreads("async-backups"));
// TODO make this configurable
private boolean syncBackup = false;
protected static final StoreSerializer SERIALIZER = new KryoSerializer() {
......@@ -163,7 +161,6 @@ public class DistributedFlowRuleStore
}
};
// TODO: make this configurable
private static final long FLOW_RULE_STORE_TIMEOUT_MILLIS = 5000;
private ReplicaInfoEventListener replicaInfoEventListener;
......@@ -247,7 +244,7 @@ public class DistributedFlowRuleStore
}
// TODO: This is not a efficient operation on a distributed sharded
// This is not a efficient operation on a distributed sharded
// flow store. We need to revisit the need for this operation or at least
// make it device specific.
@Override
......@@ -267,7 +264,6 @@ public class DistributedFlowRuleStore
if (!replicaInfo.master().isPresent()) {
log.warn("Failed to getFlowEntry: No master for {}", rule.deviceId());
// TODO: should we try returning from backup?
return null;
}
......@@ -287,9 +283,9 @@ public class DistributedFlowRuleStore
Future<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
return SERIALIZER.decode(responseFuture.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS));
} catch (IOException | TimeoutException | ExecutionException | InterruptedException e) {
// FIXME: throw a FlowStoreException
throw new RuntimeException(e);
log.warn("Unable to fetch flow store contents from {}", replicaInfo.master().get());
}
return null;
}
private StoredFlowEntry getFlowEntryInternal(FlowRule rule) {
......@@ -313,7 +309,6 @@ public class DistributedFlowRuleStore
if (!replicaInfo.master().isPresent()) {
log.warn("Failed to getFlowEntries: No master for {}", deviceId);
// TODO: should we try returning from backup?
return Collections.emptyList();
}
......@@ -333,9 +328,9 @@ public class DistributedFlowRuleStore
Future<byte[]> responseFuture = clusterCommunicator.sendAndReceive(message, replicaInfo.master().get());
return SERIALIZER.decode(responseFuture.get(FLOW_RULE_STORE_TIMEOUT_MILLIS, TimeUnit.MILLISECONDS));
} catch (IOException | TimeoutException | ExecutionException | InterruptedException e) {
// FIXME: throw a FlowStoreException
throw new RuntimeException(e);
log.warn("Unable to fetch flow store contents from {}", replicaInfo.master().get());
}
return null;
}
private Set<FlowEntry> getFlowEntriesInternal(DeviceId deviceId) {
......
......@@ -88,7 +88,7 @@ public class HazelcastIntentBatchQueue
private IntentBatchDelegate delegate;
private InternalLeaderListener leaderListener = new InternalLeaderListener();
private final Map<ApplicationId, SQueue<IntentOperations>> batchQueues
= Maps.newHashMap(); // FIXME make distributed?
= Maps.newHashMap();
private final Set<ApplicationId> myTopics = Sets.newHashSet();
private final Map<ApplicationId, IntentOperations> outstandingOps
= Maps.newHashMap();
......@@ -158,7 +158,7 @@ public class HazelcastIntentBatchQueue
public void addIntentOperations(IntentOperations ops) {
checkNotNull(ops, "Intent operations cannot be null.");
ApplicationId appId = ops.appId();
getQueue(appId).add(ops); // TODO consider using put here
getQueue(appId).add(ops);
dispatchNextOperation(appId);
}
......@@ -175,7 +175,6 @@ public class HazelcastIntentBatchQueue
log.warn("Operation {} not found", ops);
}
SQueue<IntentOperations> queue = batchQueues.get(appId);
// TODO consider alternatives to remove
checkState(queue.remove().equals(ops),
"Operations are wrong.");
outstandingOps.remove(appId);
......@@ -214,7 +213,6 @@ public class HazelcastIntentBatchQueue
*/
private void leaderChanged(String topic, boolean leader) {
ApplicationId appId = getAppId(topic);
//TODO we are using the event caller's thread, should we use our own?
synchronized (this) {
if (leader) {
myTopics.add(appId);
......
......@@ -99,7 +99,6 @@ public class HazelcastIntentStore
@Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY)
protected MetricsService metricsService;
// TODO make this configurable
private boolean onlyLogTransitionError = true;
private Timer createIntentTimer;
......@@ -142,7 +141,7 @@ public class HazelcastIntentStore
getIntentTimer = createResponseTimer("getIntent");
getIntentStateTimer = createResponseTimer("getIntentState");
// FIXME: We need a way to add serializer for intents which has been plugged-in.
// We need a way to add serializer for intents which has been plugged-in.
// As a short term workaround, relax Kryo config to
// registrationRequired=false
super.activate();
......@@ -164,7 +163,6 @@ public class HazelcastIntentStore
MapConfig intentsCfg = config.getMapConfig(INTENTS_MAP_NAME);
intentsCfg.setAsyncBackupCount(MapConfig.MAX_BACKUP_COUNT - intentsCfg.getBackupCount());
// TODO: enable near cache, allow read from backup for this IMap
IMap<byte[], byte[]> rawIntents = super.theInstance.getMap(INTENTS_MAP_NAME);
intents = new SMap<>(rawIntents , super.serializer);
intentsListenerId = intents.addEntryListener(new RemoteIntentsListener(), true);
......@@ -556,7 +554,6 @@ public class HazelcastIntentStore
}
log.trace("{} - {} -> {}", intentId, prevIntentState, newState);
// TODO sanity check and log?
} catch (InterruptedException e) {
log.error("Batch write was interrupted while processing {}", op, e);
failed.add(op);
......
......@@ -164,7 +164,6 @@ public class GossipLinkStore
backgroundExecutors =
newSingleThreadScheduledExecutor(minPriority(namedThreads("link-bg-%d")));
// TODO: Make these configurable
long initialDelaySec = 5;
long periodSec = 5;
// start anti-entropy thread
......
......@@ -48,8 +48,10 @@ import org.slf4j.Logger;
/**
* Manages inventory of topology snapshots using trivial in-memory
* structures implementation.
*
* Note: This component is not distributed per-se. It runs on every
* instance and feeds off of other distributed stores.
*/
//FIXME: I LIE I AM NOT DISTRIBUTED
@Component(immediate = true)
@Service
public class DistributedTopologyStore
......
......@@ -37,7 +37,6 @@ public class Ip4AddressSerializer extends Serializer<Ip4Address> {
@Override
public void write(Kryo kryo, Output output, Ip4Address object) {
byte[] octs = object.toOctets();
// TODO: Writing (and reading) the number of octets is redundant:
// It is always Ip4Address.BYTE_LENGTH
output.writeInt(octs.length);
output.writeBytes(octs);
......
......@@ -39,7 +39,6 @@ public final class Ip4PrefixSerializer extends Serializer<Ip4Prefix> {
public void write(Kryo kryo, Output output,
Ip4Prefix object) {
byte[] octs = object.address().toOctets();
// TODO: Writing (and reading) the number of octets is redundant:
// It is always Ip6Address.BYTE_LENGTH
output.writeInt(octs.length);
output.writeBytes(octs);
......
......@@ -37,7 +37,6 @@ public class Ip6AddressSerializer extends Serializer<Ip6Address> {
@Override
public void write(Kryo kryo, Output output, Ip6Address object) {
byte[] octs = object.toOctets();
// TODO: Writing (and reading) the number of octets is redundant:
// It is always Ip6Address.BYTE_LENGTH
output.writeInt(octs.length);
output.writeBytes(octs);
......
......@@ -39,7 +39,6 @@ public final class Ip6PrefixSerializer extends Serializer<Ip6Prefix> {
public void write(Kryo kryo, Output output,
Ip6Prefix object) {
byte[] octs = object.address().toOctets();
// TODO: Writing (and reading) the number of octets is redundant:
// It is always Ip6Address.BYTE_LENGTH
output.writeInt(octs.length);
output.writeBytes(octs);
......