Committed by
Gerrit Code Review
Removing hazelcat dependency throughout.
Change-Id: I738050fda142418d2956f613035892dac82ef098
Showing
30 changed files
with
34 additions
and
2193 deletions
... | @@ -71,7 +71,7 @@ | ... | @@ -71,7 +71,7 @@ |
71 | <dependency> | 71 | <dependency> |
72 | <groupId>org.mapdb</groupId> | 72 | <groupId>org.mapdb</groupId> |
73 | <artifactId>mapdb</artifactId> | 73 | <artifactId>mapdb</artifactId> |
74 | - <version>1.0.7</version> | 74 | + <version>1.0.7</version> |
75 | </dependency> | 75 | </dependency> |
76 | 76 | ||
77 | <dependency> | 77 | <dependency> |
... | @@ -93,31 +93,21 @@ | ... | @@ -93,31 +93,21 @@ |
93 | <artifactId>commons-lang3</artifactId> | 93 | <artifactId>commons-lang3</artifactId> |
94 | </dependency> | 94 | </dependency> |
95 | <dependency> | 95 | <dependency> |
96 | - <groupId>org.easymock</groupId> | 96 | + <groupId>org.easymock</groupId> |
97 | - <artifactId>easymock</artifactId> | 97 | + <artifactId>easymock</artifactId> |
98 | - <scope>test</scope> | 98 | + <scope>test</scope> |
99 | - </dependency> | ||
100 | - <dependency> | ||
101 | - <groupId>org.onosproject</groupId> | ||
102 | - <artifactId>onos-api</artifactId> | ||
103 | - <classifier>tests</classifier> | ||
104 | - <scope>test</scope> | ||
105 | - </dependency> | ||
106 | - <dependency> | ||
107 | - <groupId>com.hazelcast</groupId> | ||
108 | - <artifactId>hazelcast</artifactId> | ||
109 | </dependency> | 99 | </dependency> |
110 | <dependency> | 100 | <dependency> |
111 | - <groupId>com.hazelcast</groupId> | 101 | + <groupId>org.onosproject</groupId> |
112 | - <artifactId>hazelcast</artifactId> | 102 | + <artifactId>onos-api</artifactId> |
113 | - <classifier>tests</classifier> | 103 | + <classifier>tests</classifier> |
114 | - <scope>test</scope> | 104 | + <scope>test</scope> |
115 | </dependency> | 105 | </dependency> |
116 | 106 | ||
117 | <!-- for shaded copycat --> | 107 | <!-- for shaded copycat --> |
118 | <dependency> | 108 | <dependency> |
119 | - <groupId>org.onosproject</groupId> | 109 | + <groupId>org.onosproject</groupId> |
120 | - <artifactId>onlab-thirdparty</artifactId> | 110 | + <artifactId>onlab-thirdparty</artifactId> |
121 | </dependency> | 111 | </dependency> |
122 | </dependencies> | 112 | </dependencies> |
123 | 113 | ... | ... |
1 | package org.onosproject.store.cluster.impl; | 1 | package org.onosproject.store.cluster.impl; |
2 | 2 | ||
3 | -import static com.hazelcast.util.AddressUtil.matchInterface; | ||
4 | import static java.net.NetworkInterface.getNetworkInterfaces; | 3 | import static java.net.NetworkInterface.getNetworkInterfaces; |
5 | import static java.util.Collections.list; | 4 | import static java.util.Collections.list; |
6 | import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; | 5 | import static org.onosproject.cluster.DefaultControllerNode.DEFAULT_PORT; |
... | @@ -31,7 +30,6 @@ import org.slf4j.Logger; | ... | @@ -31,7 +30,6 @@ import org.slf4j.Logger; |
31 | 30 | ||
32 | import com.google.common.collect.ImmutableSet; | 31 | import com.google.common.collect.ImmutableSet; |
33 | import com.google.common.collect.Sets; | 32 | import com.google.common.collect.Sets; |
34 | -import com.hazelcast.util.AddressUtil; | ||
35 | 33 | ||
36 | /** | 34 | /** |
37 | * Implementation of ClusterDefinitionService. | 35 | * Implementation of ClusterDefinitionService. |
... | @@ -115,7 +113,7 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { | ... | @@ -115,7 +113,7 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { |
115 | Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); | 113 | Enumeration<InetAddress> inetAddresses = iface.getInetAddresses(); |
116 | while (inetAddresses.hasMoreElements()) { | 114 | while (inetAddresses.hasMoreElements()) { |
117 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); | 115 | IpAddress ip = IpAddress.valueOf(inetAddresses.nextElement()); |
118 | - if (AddressUtil.matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { | 116 | + if (matchInterface(ip.toString(), clusterDefinition.getIpPrefix())) { |
119 | return ip; | 117 | return ip; |
120 | } | 118 | } |
121 | } | 119 | } |
... | @@ -169,4 +167,11 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { | ... | @@ -169,4 +167,11 @@ public class ClusterDefinitionManager implements ClusterDefinitionService { |
169 | 167 | ||
170 | return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); | 168 | return IpAddress.valueOf(InetAddress.getLoopbackAddress()).toString(); |
171 | } | 169 | } |
170 | + | ||
171 | + // Indicates whether the specified interface address matches the given prefix. | ||
172 | + // FIXME: Add a facility to IpPrefix to make this more robust | ||
173 | + private static boolean matchInterface(String ip, String ipPrefix) { | ||
174 | + String s = ipPrefix.replaceAll("\\.\\*", ""); | ||
175 | + return ip.startsWith(s); | ||
176 | + } | ||
172 | } | 177 | } | ... | ... |
core/store/dist/src/main/java/org/onosproject/store/cluster/impl/HazelcastClusterStore.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.cluster.impl; | ||
17 | - | ||
18 | -import com.google.common.base.Optional; | ||
19 | -import com.google.common.cache.LoadingCache; | ||
20 | -import com.google.common.collect.ImmutableSet; | ||
21 | -import com.google.common.collect.Maps; | ||
22 | -import com.hazelcast.core.IMap; | ||
23 | -import com.hazelcast.core.Member; | ||
24 | -import com.hazelcast.core.MemberAttributeEvent; | ||
25 | -import com.hazelcast.core.MembershipEvent; | ||
26 | -import com.hazelcast.core.MembershipListener; | ||
27 | -import org.apache.felix.scr.annotations.Activate; | ||
28 | -import org.apache.felix.scr.annotations.Component; | ||
29 | -import org.apache.felix.scr.annotations.Deactivate; | ||
30 | -import org.apache.felix.scr.annotations.Service; | ||
31 | -import org.joda.time.DateTime; | ||
32 | -import org.onlab.packet.IpAddress; | ||
33 | -import org.onosproject.cluster.ClusterEvent; | ||
34 | -import org.onosproject.cluster.ClusterStore; | ||
35 | -import org.onosproject.cluster.ClusterStoreDelegate; | ||
36 | -import org.onosproject.cluster.ControllerNode; | ||
37 | -import org.onosproject.cluster.DefaultControllerNode; | ||
38 | -import org.onosproject.cluster.NodeId; | ||
39 | -import org.onosproject.store.hz.AbsentInvalidatingLoadingCache; | ||
40 | -import org.onosproject.store.hz.AbstractHazelcastStore; | ||
41 | -import org.onosproject.store.hz.OptionalCacheLoader; | ||
42 | - | ||
43 | -import java.util.Map; | ||
44 | -import java.util.Set; | ||
45 | -import java.util.concurrent.ConcurrentHashMap; | ||
46 | - | ||
47 | -import static com.google.common.cache.CacheBuilder.newBuilder; | ||
48 | -import static org.onosproject.cluster.ClusterEvent.Type.INSTANCE_ACTIVATED; | ||
49 | -import static org.onosproject.cluster.ClusterEvent.Type.INSTANCE_DEACTIVATED; | ||
50 | -import static org.onosproject.cluster.ControllerNode.State; | ||
51 | - | ||
52 | -/** | ||
53 | - * Distributed, Hazelcast-based implementation of the cluster nodes store. | ||
54 | - */ | ||
55 | -@Component(immediate = true, enabled = false) | ||
56 | -@Service | ||
57 | -public class HazelcastClusterStore | ||
58 | - extends AbstractHazelcastStore<ClusterEvent, ClusterStoreDelegate> | ||
59 | - implements ClusterStore { | ||
60 | - | ||
61 | - private IMap<byte[], byte[]> rawNodes; | ||
62 | - private LoadingCache<NodeId, Optional<DefaultControllerNode>> nodes; | ||
63 | - | ||
64 | - private String listenerId; | ||
65 | - private final MembershipListener listener = new InternalMembershipListener(); | ||
66 | - private final Map<NodeId, State> states = new ConcurrentHashMap<>(); | ||
67 | - private final Map<NodeId, DateTime> lastUpdatedTimes = Maps.newConcurrentMap(); | ||
68 | - | ||
69 | - private String nodesListenerId; | ||
70 | - | ||
71 | - @Override | ||
72 | - @Activate | ||
73 | - public void activate() { | ||
74 | - super.activate(); | ||
75 | - listenerId = theInstance.getCluster().addMembershipListener(listener); | ||
76 | - | ||
77 | - rawNodes = theInstance.getMap("nodes"); | ||
78 | - OptionalCacheLoader<NodeId, DefaultControllerNode> nodeLoader | ||
79 | - = new OptionalCacheLoader<>(serializer, rawNodes); | ||
80 | - nodes = new AbsentInvalidatingLoadingCache<>(newBuilder().build(nodeLoader)); | ||
81 | - nodesListenerId = rawNodes.addEntryListener(new RemoteCacheEventHandler<>(nodes), true); | ||
82 | - | ||
83 | - loadClusterNodes(); | ||
84 | - | ||
85 | - log.info("Started"); | ||
86 | - } | ||
87 | - | ||
88 | - // Loads the initial set of cluster nodes | ||
89 | - private void loadClusterNodes() { | ||
90 | - for (Member member : theInstance.getCluster().getMembers()) { | ||
91 | - addNode(node(member)); | ||
92 | - } | ||
93 | - } | ||
94 | - | ||
95 | - @Deactivate | ||
96 | - public void deactivate() { | ||
97 | - rawNodes.removeEntryListener(nodesListenerId); | ||
98 | - theInstance.getCluster().removeMembershipListener(listenerId); | ||
99 | - log.info("Stopped"); | ||
100 | - } | ||
101 | - | ||
102 | - @Override | ||
103 | - public ControllerNode getLocalNode() { | ||
104 | - return node(theInstance.getCluster().getLocalMember()); | ||
105 | - } | ||
106 | - | ||
107 | - @Override | ||
108 | - public Set<ControllerNode> getNodes() { | ||
109 | - ImmutableSet.Builder<ControllerNode> builder = ImmutableSet.builder(); | ||
110 | - for (Optional<DefaultControllerNode> optional : nodes.asMap().values()) { | ||
111 | - builder.add(optional.get()); | ||
112 | - } | ||
113 | - return builder.build(); | ||
114 | - } | ||
115 | - | ||
116 | - @Override | ||
117 | - public ControllerNode getNode(NodeId nodeId) { | ||
118 | - return nodes.getUnchecked(nodeId).orNull(); | ||
119 | - } | ||
120 | - | ||
121 | - @Override | ||
122 | - public State getState(NodeId nodeId) { | ||
123 | - State state = states.get(nodeId); | ||
124 | - return state == null ? State.INACTIVE : state; | ||
125 | - } | ||
126 | - | ||
127 | - @Override | ||
128 | - public DateTime getLastUpdated(NodeId nodeId) { | ||
129 | - return lastUpdatedTimes.get(nodeId); | ||
130 | - } | ||
131 | - | ||
132 | - @Override | ||
133 | - public ControllerNode addNode(NodeId nodeId, IpAddress ip, int tcpPort) { | ||
134 | - return addNode(new DefaultControllerNode(nodeId, ip, tcpPort)); | ||
135 | - } | ||
136 | - | ||
137 | - @Override | ||
138 | - public void removeNode(NodeId nodeId) { | ||
139 | - synchronized (this) { | ||
140 | - rawNodes.remove(serialize(nodeId)); | ||
141 | - nodes.invalidate(nodeId); | ||
142 | - } | ||
143 | - } | ||
144 | - | ||
145 | - // Adds a new node based on the specified member | ||
146 | - private synchronized ControllerNode addNode(DefaultControllerNode node) { | ||
147 | - rawNodes.put(serialize(node.id()), serialize(node)); | ||
148 | - nodes.put(node.id(), Optional.of(node)); | ||
149 | - updateState(node.id(), State.ACTIVE); | ||
150 | - return node; | ||
151 | - } | ||
152 | - | ||
153 | - // Creates a controller node descriptor from the Hazelcast member. | ||
154 | - private DefaultControllerNode node(Member member) { | ||
155 | - IpAddress ip = memberAddress(member); | ||
156 | - return new DefaultControllerNode(new NodeId(ip.toString()), ip); | ||
157 | - } | ||
158 | - | ||
159 | - private IpAddress memberAddress(Member member) { | ||
160 | - return IpAddress.valueOf(member.getSocketAddress().getAddress()); | ||
161 | - } | ||
162 | - | ||
163 | - private void updateState(NodeId nodeId, State newState) { | ||
164 | - updateState(nodeId, newState); | ||
165 | - lastUpdatedTimes.put(nodeId, DateTime.now()); | ||
166 | - } | ||
167 | - | ||
168 | - // Interceptor for membership events. | ||
169 | - private class InternalMembershipListener implements MembershipListener { | ||
170 | - @Override | ||
171 | - public void memberAdded(MembershipEvent membershipEvent) { | ||
172 | - log.info("Member {} added", membershipEvent.getMember()); | ||
173 | - ControllerNode node = addNode(node(membershipEvent.getMember())); | ||
174 | - notifyDelegate(new ClusterEvent(INSTANCE_ACTIVATED, node)); | ||
175 | - } | ||
176 | - | ||
177 | - @Override | ||
178 | - public void memberRemoved(MembershipEvent membershipEvent) { | ||
179 | - log.info("Member {} removed", membershipEvent.getMember()); | ||
180 | - NodeId nodeId = new NodeId(memberAddress(membershipEvent.getMember()).toString()); | ||
181 | - updateState(nodeId, State.INACTIVE); | ||
182 | - notifyDelegate(new ClusterEvent(INSTANCE_DEACTIVATED, getNode(nodeId))); | ||
183 | - } | ||
184 | - | ||
185 | - @Override | ||
186 | - public void memberAttributeChanged(MemberAttributeEvent memberAttributeEvent) { | ||
187 | - log.info("Member {} attribute {} changed to {}", | ||
188 | - memberAttributeEvent.getMember(), | ||
189 | - memberAttributeEvent.getKey(), | ||
190 | - memberAttributeEvent.getValue()); | ||
191 | - } | ||
192 | - } | ||
193 | -} |
This diff is collapsed. Click to expand it.
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.core.impl; | ||
17 | - | ||
18 | -import static org.apache.commons.lang3.concurrent.ConcurrentUtils.putIfAbsent; | ||
19 | - | ||
20 | -import com.google.common.collect.ImmutableSet; | ||
21 | -import com.hazelcast.core.EntryEvent; | ||
22 | -import com.hazelcast.core.EntryListener; | ||
23 | -import com.hazelcast.core.IAtomicLong; | ||
24 | -import com.hazelcast.core.MapEvent; | ||
25 | - | ||
26 | -import org.apache.felix.scr.annotations.Activate; | ||
27 | -import org.apache.felix.scr.annotations.Component; | ||
28 | -import org.apache.felix.scr.annotations.Deactivate; | ||
29 | -import org.apache.felix.scr.annotations.Service; | ||
30 | -import org.onosproject.core.ApplicationId; | ||
31 | -import org.onosproject.core.ApplicationIdStore; | ||
32 | -import org.onosproject.core.DefaultApplicationId; | ||
33 | -import org.onosproject.store.hz.AbstractHazelcastStore; | ||
34 | -import org.onosproject.store.hz.SMap; | ||
35 | -import org.onosproject.store.serializers.KryoNamespaces; | ||
36 | -import org.onosproject.store.serializers.KryoSerializer; | ||
37 | -import org.onlab.util.KryoNamespace; | ||
38 | - | ||
39 | -import java.util.Map; | ||
40 | -import java.util.Set; | ||
41 | -import java.util.concurrent.ConcurrentHashMap; | ||
42 | - | ||
43 | -/** | ||
44 | - * Simple implementation of the application ID registry using in-memory | ||
45 | - * structures. | ||
46 | - */ | ||
47 | -@Component(immediate = false, enabled = false) | ||
48 | -@Service | ||
49 | -public class DistributedApplicationIdStore | ||
50 | - extends AbstractHazelcastStore<AppIdEvent, AppIdStoreDelegate> | ||
51 | - implements ApplicationIdStore { | ||
52 | - | ||
53 | - protected IAtomicLong lastAppId; | ||
54 | - protected SMap<String, DefaultApplicationId> appIdsByName; | ||
55 | - | ||
56 | - protected Map<Short, DefaultApplicationId> appIds = new ConcurrentHashMap<>(); | ||
57 | - | ||
58 | - private String listenerId; | ||
59 | - | ||
60 | - | ||
61 | - @Override | ||
62 | - @Activate | ||
63 | - public void activate() { | ||
64 | - super.activate(); | ||
65 | - | ||
66 | - this.serializer = new KryoSerializer() { | ||
67 | - @Override | ||
68 | - protected void setupKryoPool() { | ||
69 | - serializerPool = KryoNamespace.newBuilder() | ||
70 | - .register(KryoNamespaces.API) | ||
71 | - .nextId(KryoNamespaces.BEGIN_USER_CUSTOM_ID) | ||
72 | - .build(); | ||
73 | - } | ||
74 | - }; | ||
75 | - | ||
76 | - lastAppId = theInstance.getAtomicLong("applicationId"); | ||
77 | - | ||
78 | - appIdsByName = new SMap<>(theInstance.<byte[], byte[]>getMap("appIdsByName"), this.serializer); | ||
79 | - listenerId = appIdsByName.addEntryListener((new RemoteAppIdEventHandler()), true); | ||
80 | - | ||
81 | - primeAppIds(); | ||
82 | - | ||
83 | - log.info("Started"); | ||
84 | - } | ||
85 | - | ||
86 | - @Deactivate | ||
87 | - public void deactivate() { | ||
88 | - appIdsByName.removeEntryListener(listenerId); | ||
89 | - log.info("Stopped"); | ||
90 | - } | ||
91 | - | ||
92 | - @Override | ||
93 | - public Set<ApplicationId> getAppIds() { | ||
94 | - return ImmutableSet.<ApplicationId>copyOf(appIds.values()); | ||
95 | - } | ||
96 | - | ||
97 | - @Override | ||
98 | - public ApplicationId getAppId(Short id) { | ||
99 | - ApplicationId appId = appIds.get(id); | ||
100 | - if (appId == null) { | ||
101 | - primeAppIds(); | ||
102 | - return appIds.get(id); | ||
103 | - } | ||
104 | - return appId; | ||
105 | - } | ||
106 | - | ||
107 | - @Override | ||
108 | - public ApplicationId getAppId(String name) { | ||
109 | - return appIdsByName.get(name); | ||
110 | - } | ||
111 | - | ||
112 | - private void primeAppIds() { | ||
113 | - for (DefaultApplicationId appId : appIdsByName.values()) { | ||
114 | - appIds.putIfAbsent(appId.id(), appId); | ||
115 | - } | ||
116 | - } | ||
117 | - | ||
118 | - @Override | ||
119 | - public ApplicationId registerApplication(String name) { | ||
120 | - DefaultApplicationId appId = appIdsByName.get(name); | ||
121 | - if (appId == null) { | ||
122 | - int id = (int) lastAppId.getAndIncrement(); | ||
123 | - appId = putIfAbsent(appIdsByName, name, | ||
124 | - new DefaultApplicationId(id, name)); | ||
125 | - } | ||
126 | - return appId; | ||
127 | - } | ||
128 | - | ||
129 | - private class RemoteAppIdEventHandler implements EntryListener<String, DefaultApplicationId> { | ||
130 | - @Override | ||
131 | - public void entryAdded(EntryEvent<String, DefaultApplicationId> event) { | ||
132 | - DefaultApplicationId appId = event.getValue(); | ||
133 | - appIds.put(appId.id(), appId); | ||
134 | - } | ||
135 | - | ||
136 | - @Override | ||
137 | - public void entryRemoved(EntryEvent<String, DefaultApplicationId> event) { | ||
138 | - } | ||
139 | - | ||
140 | - @Override | ||
141 | - public void entryUpdated(EntryEvent<String, DefaultApplicationId> event) { | ||
142 | - entryAdded(event); | ||
143 | - } | ||
144 | - | ||
145 | - @Override | ||
146 | - public void entryEvicted(EntryEvent<String, DefaultApplicationId> event) { | ||
147 | - } | ||
148 | - | ||
149 | - @Override | ||
150 | - public void mapEvicted(MapEvent event) { | ||
151 | - } | ||
152 | - | ||
153 | - @Override | ||
154 | - public void mapCleared(MapEvent event) { | ||
155 | - } | ||
156 | - } | ||
157 | -} |
core/store/dist/src/main/java/org/onosproject/store/core/impl/DistributedIdBlockStore.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.core.impl; | ||
17 | - | ||
18 | -import com.hazelcast.core.HazelcastInstance; | ||
19 | -import com.hazelcast.core.IAtomicLong; | ||
20 | -import org.apache.felix.scr.annotations.Activate; | ||
21 | -import org.apache.felix.scr.annotations.Component; | ||
22 | -import org.apache.felix.scr.annotations.Reference; | ||
23 | -import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
24 | -import org.apache.felix.scr.annotations.Service; | ||
25 | -import org.onosproject.core.IdBlock; | ||
26 | -import org.onosproject.core.IdBlockStore; | ||
27 | -import org.onosproject.store.hz.StoreService; | ||
28 | - | ||
29 | -import java.util.Map; | ||
30 | - | ||
31 | -/** | ||
32 | - * Distributed implementation of id block store using Hazelcast. | ||
33 | - */ | ||
34 | -@Component(immediate = false, enabled = false) | ||
35 | -@Service | ||
36 | -public class DistributedIdBlockStore implements IdBlockStore { | ||
37 | - | ||
38 | - private static final long DEFAULT_BLOCK_SIZE = 0x100000L; | ||
39 | - | ||
40 | - protected Map<String, IAtomicLong> topicBlocks; | ||
41 | - | ||
42 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
43 | - protected StoreService storeService; | ||
44 | - | ||
45 | - protected HazelcastInstance theInstance; | ||
46 | - | ||
47 | - @Activate | ||
48 | - public void activate() { | ||
49 | - theInstance = storeService.getHazelcastInstance(); | ||
50 | - } | ||
51 | - | ||
52 | - @Override | ||
53 | - public IdBlock getIdBlock(String topic) { | ||
54 | - Long blockBase = theInstance.getAtomicLong(topic).getAndAdd(DEFAULT_BLOCK_SIZE); | ||
55 | - return new IdBlock(blockBase, DEFAULT_BLOCK_SIZE); | ||
56 | - } | ||
57 | -} |
core/store/dist/src/main/java/org/onosproject/store/flow/impl/DistributedFlowRuleStore.java
deleted
100644 → 0
This diff is collapsed. Click to expand it.
core/store/dist/src/main/java/org/onosproject/store/hz/AbsentInvalidatingLoadingCache.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import java.util.concurrent.Callable; | ||
19 | -import java.util.concurrent.ExecutionException; | ||
20 | - | ||
21 | -import com.google.common.base.Optional; | ||
22 | -import com.google.common.cache.ForwardingLoadingCache.SimpleForwardingLoadingCache; | ||
23 | -import com.google.common.cache.LoadingCache; | ||
24 | - | ||
25 | -/** | ||
26 | - * Wrapper around LoadingCache to handle negative hit scenario. | ||
27 | - * <p> | ||
28 | - * When the LoadingCache returned Absent, | ||
29 | - * this implementation will invalidate the entry immediately to avoid | ||
30 | - * caching negative hits. | ||
31 | - * | ||
32 | - * @param <K> Cache key type | ||
33 | - * @param <V> Cache value type. (Optional{@literal <V>}) | ||
34 | - */ | ||
35 | -public class AbsentInvalidatingLoadingCache<K, V> extends | ||
36 | - SimpleForwardingLoadingCache<K, Optional<V>> { | ||
37 | - | ||
38 | - /** | ||
39 | - * Constructor. | ||
40 | - * | ||
41 | - * @param delegate actual {@link LoadingCache} to delegate loading. | ||
42 | - */ | ||
43 | - public AbsentInvalidatingLoadingCache(LoadingCache<K, Optional<V>> delegate) { | ||
44 | - super(delegate); | ||
45 | - } | ||
46 | - | ||
47 | - @Override | ||
48 | - public Optional<V> get(K key) throws ExecutionException { | ||
49 | - Optional<V> v = super.get(key); | ||
50 | - if (!v.isPresent()) { | ||
51 | - invalidate(key); | ||
52 | - } | ||
53 | - return v; | ||
54 | - } | ||
55 | - | ||
56 | - @Override | ||
57 | - public Optional<V> getUnchecked(K key) { | ||
58 | - Optional<V> v = super.getUnchecked(key); | ||
59 | - if (!v.isPresent()) { | ||
60 | - invalidate(key); | ||
61 | - } | ||
62 | - return v; | ||
63 | - } | ||
64 | - | ||
65 | - @Override | ||
66 | - public Optional<V> apply(K key) { | ||
67 | - return getUnchecked(key); | ||
68 | - } | ||
69 | - | ||
70 | - @Override | ||
71 | - public Optional<V> getIfPresent(Object key) { | ||
72 | - Optional<V> v = super.getIfPresent(key); | ||
73 | - if (!v.isPresent()) { | ||
74 | - invalidate(key); | ||
75 | - } | ||
76 | - return v; | ||
77 | - } | ||
78 | - | ||
79 | - @Override | ||
80 | - public Optional<V> get(K key, Callable<? extends Optional<V>> valueLoader) | ||
81 | - throws ExecutionException { | ||
82 | - | ||
83 | - Optional<V> v = super.get(key, valueLoader); | ||
84 | - if (!v.isPresent()) { | ||
85 | - invalidate(key); | ||
86 | - } | ||
87 | - return v; | ||
88 | - } | ||
89 | -} |
core/store/dist/src/main/java/org/onosproject/store/hz/AbstractHazelcastStore.java
deleted
100644 → 0
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import com.google.common.base.Optional; | ||
19 | -import com.google.common.cache.LoadingCache; | ||
20 | -import com.hazelcast.core.EntryAdapter; | ||
21 | -import com.hazelcast.core.EntryEvent; | ||
22 | -import com.hazelcast.core.HazelcastInstance; | ||
23 | -import com.hazelcast.core.MapEvent; | ||
24 | -import com.hazelcast.core.Member; | ||
25 | - | ||
26 | -import org.apache.felix.scr.annotations.Activate; | ||
27 | -import org.apache.felix.scr.annotations.Component; | ||
28 | -import org.apache.felix.scr.annotations.Reference; | ||
29 | -import org.apache.felix.scr.annotations.ReferenceCardinality; | ||
30 | -import org.onosproject.event.Event; | ||
31 | -import org.onosproject.store.AbstractStore; | ||
32 | -import org.onosproject.store.StoreDelegate; | ||
33 | -import org.onosproject.store.serializers.KryoSerializer; | ||
34 | -import org.onosproject.store.serializers.StoreSerializer; | ||
35 | -import org.slf4j.Logger; | ||
36 | - | ||
37 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
38 | -import static org.slf4j.LoggerFactory.getLogger; | ||
39 | - | ||
40 | -/** | ||
41 | - * Abstraction of a distributed store based on Hazelcast. | ||
42 | - */ | ||
43 | -@Component | ||
44 | -public abstract class AbstractHazelcastStore<E extends Event, D extends StoreDelegate<E>> | ||
45 | - extends AbstractStore<E, D> { | ||
46 | - | ||
47 | - protected final Logger log = getLogger(getClass()); | ||
48 | - | ||
49 | - @Reference(cardinality = ReferenceCardinality.MANDATORY_UNARY) | ||
50 | - protected StoreService storeService; | ||
51 | - | ||
52 | - protected StoreSerializer serializer; | ||
53 | - | ||
54 | - protected HazelcastInstance theInstance; | ||
55 | - | ||
56 | - @Activate | ||
57 | - public void activate() { | ||
58 | - serializer = new KryoSerializer(); | ||
59 | - theInstance = storeService.getHazelcastInstance(); | ||
60 | - } | ||
61 | - | ||
62 | - /** | ||
63 | - * Serializes the specified object using the backing store service. | ||
64 | - * | ||
65 | - * @param obj object to be serialized | ||
66 | - * @return serialized object | ||
67 | - */ | ||
68 | - protected byte[] serialize(Object obj) { | ||
69 | - return serializer.encode(obj); | ||
70 | - } | ||
71 | - | ||
72 | - /** | ||
73 | - * Deserializes the specified object using the backing store service. | ||
74 | - * | ||
75 | - * @param bytes bytes to be deserialized | ||
76 | - * @param <T> type of object | ||
77 | - * @return deserialized object | ||
78 | - */ | ||
79 | - protected <T> T deserialize(byte[] bytes) { | ||
80 | - return serializer.decode(bytes); | ||
81 | - } | ||
82 | - | ||
83 | - | ||
84 | - /** | ||
85 | - * An IMap entry listener, which reflects each remote event to the cache. | ||
86 | - * | ||
87 | - * @param <K> IMap key type after deserialization | ||
88 | - * @param <V> IMap value type after deserialization | ||
89 | - */ | ||
90 | - public class RemoteCacheEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | ||
91 | - | ||
92 | - private final Member localMember; | ||
93 | - private LoadingCache<K, Optional<V>> cache; | ||
94 | - | ||
95 | - /** | ||
96 | - * Constructor. | ||
97 | - * | ||
98 | - * @param cache cache to update | ||
99 | - */ | ||
100 | - public RemoteCacheEventHandler(LoadingCache<K, Optional<V>> cache) { | ||
101 | - this.localMember = theInstance.getCluster().getLocalMember(); | ||
102 | - this.cache = checkNotNull(cache); | ||
103 | - } | ||
104 | - | ||
105 | - @Override | ||
106 | - public void mapCleared(MapEvent event) { | ||
107 | - if (localMember.equals(event.getMember())) { | ||
108 | - // ignore locally triggered event | ||
109 | - return; | ||
110 | - } | ||
111 | - cache.invalidateAll(); | ||
112 | - } | ||
113 | - | ||
114 | - @Override | ||
115 | - public void entryAdded(EntryEvent<byte[], byte[]> event) { | ||
116 | - if (localMember.equals(event.getMember())) { | ||
117 | - // ignore locally triggered event | ||
118 | - return; | ||
119 | - } | ||
120 | - K key = deserialize(event.getKey()); | ||
121 | - V newVal = deserialize(event.getValue()); | ||
122 | - Optional<V> newValue = Optional.of(newVal); | ||
123 | - cache.asMap().putIfAbsent(key, newValue); | ||
124 | - onAdd(key, newVal); | ||
125 | - } | ||
126 | - | ||
127 | - @Override | ||
128 | - public void entryUpdated(EntryEvent<byte[], byte[]> event) { | ||
129 | - if (localMember.equals(event.getMember())) { | ||
130 | - // ignore locally triggered event | ||
131 | - return; | ||
132 | - } | ||
133 | - K key = deserialize(event.getKey()); | ||
134 | - V oldVal = deserialize(event.getOldValue()); | ||
135 | - Optional<V> oldValue = Optional.fromNullable(oldVal); | ||
136 | - V newVal = deserialize(event.getValue()); | ||
137 | - Optional<V> newValue = Optional.of(newVal); | ||
138 | - cache.asMap().replace(key, oldValue, newValue); | ||
139 | - onUpdate(key, oldVal, newVal); | ||
140 | - } | ||
141 | - | ||
142 | - @Override | ||
143 | - public void entryRemoved(EntryEvent<byte[], byte[]> event) { | ||
144 | - if (localMember.equals(event.getMember())) { | ||
145 | - // ignore locally triggered event | ||
146 | - return; | ||
147 | - } | ||
148 | - K key = deserialize(event.getKey()); | ||
149 | - V val = deserialize(event.getOldValue()); | ||
150 | - cache.invalidate(key); | ||
151 | - onRemove(key, val); | ||
152 | - } | ||
153 | - | ||
154 | - /** | ||
155 | - * Cache entry addition hook. | ||
156 | - * | ||
157 | - * @param key new key | ||
158 | - * @param newVal new value | ||
159 | - */ | ||
160 | - protected void onAdd(K key, V newVal) { | ||
161 | - } | ||
162 | - | ||
163 | - /** | ||
164 | - * Cache entry update hook. | ||
165 | - * | ||
166 | - * @param key new key | ||
167 | - * @param oldValue old value | ||
168 | - * @param newVal new value | ||
169 | - */ | ||
170 | - protected void onUpdate(K key, V oldValue, V newVal) { | ||
171 | - } | ||
172 | - | ||
173 | - /** | ||
174 | - * Cache entry remove hook. | ||
175 | - * | ||
176 | - * @param key new key | ||
177 | - * @param val old value | ||
178 | - */ | ||
179 | - protected void onRemove(K key, V val) { | ||
180 | - } | ||
181 | - } | ||
182 | - | ||
183 | - /** | ||
184 | - * Distributed object remote event entry listener. | ||
185 | - * | ||
186 | - * @param <K> Entry key type after deserialization | ||
187 | - * @param <V> Entry value type after deserialization | ||
188 | - */ | ||
189 | - public class RemoteEventHandler<K, V> extends EntryAdapter<byte[], byte[]> { | ||
190 | - | ||
191 | - private final Member localMember; | ||
192 | - | ||
193 | - public RemoteEventHandler() { | ||
194 | - this.localMember = theInstance.getCluster().getLocalMember(); | ||
195 | - } | ||
196 | - @Override | ||
197 | - public void entryAdded(EntryEvent<byte[], byte[]> event) { | ||
198 | - if (localMember.equals(event.getMember())) { | ||
199 | - // ignore locally triggered event | ||
200 | - return; | ||
201 | - } | ||
202 | - K key = deserialize(event.getKey()); | ||
203 | - V newVal = deserialize(event.getValue()); | ||
204 | - onAdd(key, newVal); | ||
205 | - } | ||
206 | - | ||
207 | - @Override | ||
208 | - public void entryRemoved(EntryEvent<byte[], byte[]> event) { | ||
209 | - if (localMember.equals(event.getMember())) { | ||
210 | - // ignore locally triggered event | ||
211 | - return; | ||
212 | - } | ||
213 | - K key = deserialize(event.getKey()); | ||
214 | - V val = deserialize(event.getValue()); | ||
215 | - onRemove(key, val); | ||
216 | - } | ||
217 | - | ||
218 | - @Override | ||
219 | - public void entryUpdated(EntryEvent<byte[], byte[]> event) { | ||
220 | - if (localMember.equals(event.getMember())) { | ||
221 | - // ignore locally triggered event | ||
222 | - return; | ||
223 | - } | ||
224 | - K key = deserialize(event.getKey()); | ||
225 | - V oldVal = deserialize(event.getOldValue()); | ||
226 | - V newVal = deserialize(event.getValue()); | ||
227 | - onUpdate(key, oldVal, newVal); | ||
228 | - } | ||
229 | - | ||
230 | - /** | ||
231 | - * Remote entry addition hook. | ||
232 | - * | ||
233 | - * @param key new key | ||
234 | - * @param newVal new value | ||
235 | - */ | ||
236 | - protected void onAdd(K key, V newVal) { | ||
237 | - } | ||
238 | - | ||
239 | - /** | ||
240 | - * Remote entry update hook. | ||
241 | - * | ||
242 | - * @param key new key | ||
243 | - * @param oldValue old value | ||
244 | - * @param newVal new value | ||
245 | - */ | ||
246 | - protected void onUpdate(K key, V oldValue, V newVal) { | ||
247 | - } | ||
248 | - | ||
249 | - /** | ||
250 | - * Remote entry remove hook. | ||
251 | - * | ||
252 | - * @param key new key | ||
253 | - * @param val old value | ||
254 | - */ | ||
255 | - protected void onRemove(K key, V val) { | ||
256 | - } | ||
257 | - } | ||
258 | - | ||
259 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
19 | - | ||
20 | -import org.onosproject.store.serializers.StoreSerializer; | ||
21 | - | ||
22 | -import com.google.common.base.Optional; | ||
23 | -import com.google.common.cache.CacheLoader; | ||
24 | -import com.hazelcast.core.IMap; | ||
25 | - | ||
26 | -/** | ||
27 | - * CacheLoader to wrap Map value with Optional, | ||
28 | - * to handle negative hit on underlying IMap. | ||
29 | - * | ||
30 | - * @param <K> IMap key type after deserialization | ||
31 | - * @param <V> IMap value type after deserialization | ||
32 | - */ | ||
33 | -public final class OptionalCacheLoader<K, V> extends | ||
34 | - CacheLoader<K, Optional<V>> { | ||
35 | - | ||
36 | - private final StoreSerializer serializer; | ||
37 | - private IMap<byte[], byte[]> rawMap; | ||
38 | - | ||
39 | - /** | ||
40 | - * Constructor. | ||
41 | - * | ||
42 | - * @param serializer to use for serialization | ||
43 | - * @param rawMap underlying IMap | ||
44 | - */ | ||
45 | - public OptionalCacheLoader(StoreSerializer serializer, IMap<byte[], byte[]> rawMap) { | ||
46 | - this.serializer = checkNotNull(serializer); | ||
47 | - this.rawMap = checkNotNull(rawMap); | ||
48 | - } | ||
49 | - | ||
50 | - @Override | ||
51 | - public Optional<V> load(K key) throws Exception { | ||
52 | - byte[] keyBytes = serializer.encode(key); | ||
53 | - byte[] valBytes = rawMap.get(keyBytes); | ||
54 | - if (valBytes == null) { | ||
55 | - return Optional.absent(); | ||
56 | - } | ||
57 | - V dev = serializer.decode(valBytes); | ||
58 | - return Optional.of(dev); | ||
59 | - } | ||
60 | -} |
This diff is collapsed. Click to expand it.
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import com.google.common.base.Function; | ||
19 | -import com.google.common.collect.FluentIterable; | ||
20 | -import com.hazelcast.core.IQueue; | ||
21 | -import com.hazelcast.core.ItemEvent; | ||
22 | -import com.hazelcast.core.ItemListener; | ||
23 | -import com.hazelcast.monitor.LocalQueueStats; | ||
24 | - | ||
25 | -import org.onosproject.store.serializers.StoreSerializer; | ||
26 | - | ||
27 | -import java.util.Collection; | ||
28 | -import java.util.Iterator; | ||
29 | -import java.util.concurrent.TimeUnit; | ||
30 | - | ||
31 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
32 | - | ||
33 | -/** | ||
34 | - * Wrapper around IQueue<byte[]> which serializes/deserializes | ||
35 | - * key and value using StoreSerializer. | ||
36 | - * | ||
37 | - * @param <T> type | ||
38 | - */ | ||
39 | -public class SQueue<T> implements IQueue<T> { | ||
40 | - | ||
41 | - private final IQueue<byte[]> q; | ||
42 | - private final StoreSerializer serializer; | ||
43 | - | ||
44 | - /** | ||
45 | - * Creates a SQueue instance. | ||
46 | - * | ||
47 | - * @param baseQueue base IQueue to use | ||
48 | - * @param serializer serializer to use for both key and value | ||
49 | - */ | ||
50 | - public SQueue(IQueue<byte[]> baseQueue, StoreSerializer serializer) { | ||
51 | - this.q = checkNotNull(baseQueue); | ||
52 | - this.serializer = checkNotNull(serializer); | ||
53 | - } | ||
54 | - | ||
55 | - private byte[] serialize(Object key) { | ||
56 | - return serializer.encode(key); | ||
57 | - } | ||
58 | - | ||
59 | - private T deserialize(byte[] key) { | ||
60 | - return serializer.decode(key); | ||
61 | - } | ||
62 | - | ||
63 | - @Override | ||
64 | - public boolean add(T t) { | ||
65 | - return q.add(serialize(t)); | ||
66 | - } | ||
67 | - | ||
68 | - @Override | ||
69 | - public boolean offer(T t) { | ||
70 | - return q.offer(serialize(t)); | ||
71 | - } | ||
72 | - | ||
73 | - @Override | ||
74 | - public void put(T t) throws InterruptedException { | ||
75 | - q.put(serialize(t)); | ||
76 | - } | ||
77 | - | ||
78 | - @Override | ||
79 | - public boolean offer(T t, long l, TimeUnit timeUnit) throws InterruptedException { | ||
80 | - return q.offer(serialize(t), l, timeUnit); | ||
81 | - } | ||
82 | - | ||
83 | - @Override | ||
84 | - public T take() throws InterruptedException { | ||
85 | - return deserialize(q.take()); | ||
86 | - } | ||
87 | - | ||
88 | - @Override | ||
89 | - public T poll(long l, TimeUnit timeUnit) throws InterruptedException { | ||
90 | - return deserialize(q.poll(l, timeUnit)); | ||
91 | - } | ||
92 | - | ||
93 | - @Override | ||
94 | - public int remainingCapacity() { | ||
95 | - return q.remainingCapacity(); | ||
96 | - } | ||
97 | - | ||
98 | - @Override | ||
99 | - public boolean remove(Object o) { | ||
100 | - return q.remove(serialize(o)); | ||
101 | - } | ||
102 | - | ||
103 | - @Override | ||
104 | - public boolean contains(Object o) { | ||
105 | - return q.contains(serialize(o)); | ||
106 | - } | ||
107 | - | ||
108 | - @Deprecated // not implemented yet | ||
109 | - @Override | ||
110 | - public int drainTo(Collection<? super T> collection) { | ||
111 | - throw new UnsupportedOperationException(); | ||
112 | - } | ||
113 | - | ||
114 | - @Deprecated // not implemented yet | ||
115 | - @Override | ||
116 | - public int drainTo(Collection<? super T> collection, int i) { | ||
117 | - throw new UnsupportedOperationException(); | ||
118 | - } | ||
119 | - | ||
120 | - @Override | ||
121 | - public T remove() { | ||
122 | - return deserialize(q.remove()); | ||
123 | - } | ||
124 | - | ||
125 | - @Override | ||
126 | - public T poll() { | ||
127 | - return deserialize(q.poll()); | ||
128 | - } | ||
129 | - | ||
130 | - @Override | ||
131 | - public T element() { | ||
132 | - return deserialize(q.element()); | ||
133 | - } | ||
134 | - | ||
135 | - @Override | ||
136 | - public T peek() { | ||
137 | - return deserialize(q.peek()); | ||
138 | - } | ||
139 | - | ||
140 | - @Override | ||
141 | - public int size() { | ||
142 | - return q.size(); | ||
143 | - } | ||
144 | - | ||
145 | - @Override | ||
146 | - public boolean isEmpty() { | ||
147 | - return q.isEmpty(); | ||
148 | - } | ||
149 | - | ||
150 | - @Override | ||
151 | - public Iterator<T> iterator() { | ||
152 | - return FluentIterable.from(q) | ||
153 | - .transform(new DeserializeVal()) | ||
154 | - .iterator(); | ||
155 | - } | ||
156 | - | ||
157 | - @Deprecated // not implemented yet | ||
158 | - @Override | ||
159 | - public Object[] toArray() { | ||
160 | - throw new UnsupportedOperationException(); | ||
161 | - } | ||
162 | - | ||
163 | - @Deprecated // not implemented yet | ||
164 | - @Override | ||
165 | - public <T1> T1[] toArray(T1[] t1s) { | ||
166 | - throw new UnsupportedOperationException(); | ||
167 | - } | ||
168 | - | ||
169 | - @Deprecated // not implemented yet | ||
170 | - @Override | ||
171 | - public boolean containsAll(Collection<?> collection) { | ||
172 | - throw new UnsupportedOperationException(); | ||
173 | - } | ||
174 | - | ||
175 | - @Deprecated // not implemented yet | ||
176 | - @Override | ||
177 | - public boolean addAll(Collection<? extends T> collection) { | ||
178 | - throw new UnsupportedOperationException(); | ||
179 | - } | ||
180 | - | ||
181 | - @Deprecated // not implemented yet | ||
182 | - @Override | ||
183 | - public boolean removeAll(Collection<?> collection) { | ||
184 | - throw new UnsupportedOperationException(); | ||
185 | - } | ||
186 | - | ||
187 | - @Deprecated // not implemented yet | ||
188 | - @Override | ||
189 | - public boolean retainAll(Collection<?> collection) { | ||
190 | - throw new UnsupportedOperationException(); | ||
191 | - } | ||
192 | - | ||
193 | - @Override | ||
194 | - public void clear() { | ||
195 | - q.clear(); | ||
196 | - } | ||
197 | - | ||
198 | - @Override | ||
199 | - public LocalQueueStats getLocalQueueStats() { | ||
200 | - return q.getLocalQueueStats(); | ||
201 | - } | ||
202 | - | ||
203 | - | ||
204 | - @Override | ||
205 | - public String addItemListener(ItemListener<T> itemListener, boolean withValue) { | ||
206 | - ItemListener<byte[]> il = new ItemListener<byte[]>() { | ||
207 | - @Override | ||
208 | - public void itemAdded(ItemEvent<byte[]> item) { | ||
209 | - itemListener.itemAdded(new ItemEvent<T>(getName(item), | ||
210 | - item.getEventType(), | ||
211 | - deserialize(item.getItem()), | ||
212 | - item.getMember())); | ||
213 | - } | ||
214 | - | ||
215 | - @Override | ||
216 | - public void itemRemoved(ItemEvent<byte[]> item) { | ||
217 | - itemListener.itemRemoved(new ItemEvent<T>(getName(item), | ||
218 | - item.getEventType(), | ||
219 | - deserialize(item.getItem()), | ||
220 | - item.getMember())); | ||
221 | - } | ||
222 | - | ||
223 | - private String getName(ItemEvent<byte[]> item) { | ||
224 | - return (item.getSource() instanceof String) ? | ||
225 | - (String) item.getSource() : item.getSource().toString(); | ||
226 | - | ||
227 | - } | ||
228 | - }; | ||
229 | - return q.addItemListener(il, withValue); | ||
230 | - } | ||
231 | - | ||
232 | - | ||
233 | - @Override | ||
234 | - public boolean removeItemListener(String registrationId) { | ||
235 | - return q.removeItemListener(registrationId); | ||
236 | - } | ||
237 | - | ||
238 | - @Deprecated | ||
239 | - @Override | ||
240 | - public Object getId() { | ||
241 | - return q.getId(); | ||
242 | - } | ||
243 | - | ||
244 | - @Override | ||
245 | - public String getPartitionKey() { | ||
246 | - return q.getPartitionKey(); | ||
247 | - } | ||
248 | - | ||
249 | - @Override | ||
250 | - public String getName() { | ||
251 | - return q.getName(); | ||
252 | - } | ||
253 | - | ||
254 | - @Override | ||
255 | - public String getServiceName() { | ||
256 | - return q.getServiceName(); | ||
257 | - } | ||
258 | - | ||
259 | - @Override | ||
260 | - public void destroy() { | ||
261 | - q.destroy(); | ||
262 | - } | ||
263 | - | ||
264 | - private final class DeserializeVal implements Function<byte[], T> { | ||
265 | - @Override | ||
266 | - public T apply(byte[] input) { | ||
267 | - return deserialize(input); | ||
268 | - } | ||
269 | - } | ||
270 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkNotNull; | ||
19 | - | ||
20 | -import java.util.ArrayList; | ||
21 | -import java.util.Collection; | ||
22 | -import java.util.HashSet; | ||
23 | -import java.util.Set; | ||
24 | -import java.util.concurrent.TimeUnit; | ||
25 | - | ||
26 | -import org.onosproject.store.serializers.StoreSerializer; | ||
27 | - | ||
28 | -import com.hazelcast.core.TransactionalMap; | ||
29 | -import com.hazelcast.query.Predicate; | ||
30 | - | ||
31 | -/** | ||
32 | - * Wrapper around TransactionalMap<byte[], byte[]> which serializes/deserializes | ||
33 | - * key and value using StoreSerializer. | ||
34 | - * | ||
35 | - * @param <K> key type | ||
36 | - * @param <V> value type | ||
37 | - */ | ||
38 | -public class STxMap<K, V> implements TransactionalMap<K, V> { | ||
39 | - | ||
40 | - private final TransactionalMap<byte[], byte[]> m; | ||
41 | - private final StoreSerializer serializer; | ||
42 | - | ||
43 | - /** | ||
44 | - * Creates a STxMap instance. | ||
45 | - * | ||
46 | - * @param baseMap base IMap to use | ||
47 | - * @param serializer serializer to use for both key and value | ||
48 | - */ | ||
49 | - public STxMap(TransactionalMap<byte[], byte[]> baseMap, StoreSerializer serializer) { | ||
50 | - this.m = checkNotNull(baseMap); | ||
51 | - this.serializer = checkNotNull(serializer); | ||
52 | - } | ||
53 | - | ||
54 | - @Override | ||
55 | - public int size() { | ||
56 | - return m.size(); | ||
57 | - } | ||
58 | - | ||
59 | - @Override | ||
60 | - public boolean isEmpty() { | ||
61 | - return m.isEmpty(); | ||
62 | - } | ||
63 | - | ||
64 | - @Deprecated | ||
65 | - @Override | ||
66 | - public Object getId() { | ||
67 | - return m.getId(); | ||
68 | - } | ||
69 | - | ||
70 | - @Override | ||
71 | - public String getPartitionKey() { | ||
72 | - return m.getPartitionKey(); | ||
73 | - } | ||
74 | - | ||
75 | - @Override | ||
76 | - public String getName() { | ||
77 | - return m.getName(); | ||
78 | - } | ||
79 | - | ||
80 | - @Override | ||
81 | - public String getServiceName() { | ||
82 | - return m.getServiceName(); | ||
83 | - } | ||
84 | - | ||
85 | - @Override | ||
86 | - public void destroy() { | ||
87 | - m.destroy(); | ||
88 | - } | ||
89 | - | ||
90 | - @Override | ||
91 | - public boolean containsKey(Object key) { | ||
92 | - return m.containsKey(serializeKey(key)); | ||
93 | - } | ||
94 | - | ||
95 | - @Override | ||
96 | - public V get(Object key) { | ||
97 | - return deserializeVal(m.get(serializeKey(key))); | ||
98 | - } | ||
99 | - | ||
100 | - @Override | ||
101 | - public V getForUpdate(Object key) { | ||
102 | - return deserializeVal(m.getForUpdate(serializeKey(key))); | ||
103 | - } | ||
104 | - | ||
105 | - @Override | ||
106 | - public V put(K key, V value) { | ||
107 | - return deserializeVal(m.put(serializeKey(key), serializeVal(value))); | ||
108 | - } | ||
109 | - | ||
110 | - @Override | ||
111 | - public V remove(Object key) { | ||
112 | - return deserializeVal(m.remove(serializeKey(key))); | ||
113 | - } | ||
114 | - | ||
115 | - @Override | ||
116 | - public boolean remove(Object key, Object value) { | ||
117 | - return m.remove(serializeKey(key), serializeVal(value)); | ||
118 | - } | ||
119 | - | ||
120 | - @Override | ||
121 | - public void delete(Object key) { | ||
122 | - m.delete(serializeKey(key)); | ||
123 | - } | ||
124 | - | ||
125 | - @Override | ||
126 | - public V put(K key, V value, long ttl, TimeUnit timeunit) { | ||
127 | - return deserializeVal(m.put(serializeKey(key), serializeVal(value), ttl, timeunit)); | ||
128 | - } | ||
129 | - | ||
130 | - @Override | ||
131 | - public V putIfAbsent(K key, V value) { | ||
132 | - return deserializeVal(m.putIfAbsent(serializeKey(key), serializeVal(value))); | ||
133 | - } | ||
134 | - | ||
135 | - @Override | ||
136 | - public boolean replace(K key, V oldValue, V newValue) { | ||
137 | - return m.replace(serializeKey(key), serializeVal(oldValue), serializeVal(newValue)); | ||
138 | - } | ||
139 | - | ||
140 | - @Override | ||
141 | - public V replace(K key, V value) { | ||
142 | - return deserializeVal(m.replace(serializeKey(key), serializeVal(value))); | ||
143 | - } | ||
144 | - | ||
145 | - @Override | ||
146 | - public void set(K key, V value) { | ||
147 | - m.set(serializeKey(key), serializeVal(value)); | ||
148 | - } | ||
149 | - | ||
150 | - | ||
151 | - @Override | ||
152 | - public Set<K> keySet() { | ||
153 | - return deserializeKeySet(m.keySet()); | ||
154 | - } | ||
155 | - | ||
156 | - @Override | ||
157 | - public Collection<V> values() { | ||
158 | - return deserializeVals(m.values()); | ||
159 | - } | ||
160 | - | ||
161 | - @Deprecated // marking method not implemented | ||
162 | - @SuppressWarnings("rawtypes") | ||
163 | - @Override | ||
164 | - public Set<K> keySet(Predicate predicate) { | ||
165 | - throw new UnsupportedOperationException(); | ||
166 | - } | ||
167 | - | ||
168 | - @Deprecated // marking method not implemented | ||
169 | - @SuppressWarnings("rawtypes") | ||
170 | - @Override | ||
171 | - public Collection<V> values(Predicate predicate) { | ||
172 | - throw new UnsupportedOperationException(); | ||
173 | - } | ||
174 | - | ||
175 | - private byte[] serializeKey(Object key) { | ||
176 | - return serializer.encode(key); | ||
177 | - } | ||
178 | - | ||
179 | - private K deserializeKey(byte[] key) { | ||
180 | - return serializer.decode(key); | ||
181 | - } | ||
182 | - | ||
183 | - private byte[] serializeVal(Object val) { | ||
184 | - return serializer.encode(val); | ||
185 | - } | ||
186 | - | ||
187 | - private V deserializeVal(byte[] val) { | ||
188 | - if (val == null) { | ||
189 | - return null; | ||
190 | - } | ||
191 | - return serializer.decode(val.clone()); | ||
192 | - } | ||
193 | - | ||
194 | - private Set<K> deserializeKeySet(Set<byte[]> keys) { | ||
195 | - Set<K> dsk = new HashSet<>(keys.size()); | ||
196 | - for (byte[] key : keys) { | ||
197 | - dsk.add(deserializeKey(key)); | ||
198 | - } | ||
199 | - return dsk; | ||
200 | - } | ||
201 | - | ||
202 | - private Collection<V> deserializeVals(Collection<byte[]> vals) { | ||
203 | - Collection<V> dsl = new ArrayList<>(vals.size()); | ||
204 | - for (byte[] val : vals) { | ||
205 | - dsl.add(deserializeVal(val)); | ||
206 | - } | ||
207 | - return dsl; | ||
208 | - } | ||
209 | -} |
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import com.google.common.io.ByteStreams; | ||
19 | -import com.google.common.io.Files; | ||
20 | -import com.hazelcast.config.Config; | ||
21 | -import com.hazelcast.config.FileSystemXmlConfig; | ||
22 | -import com.hazelcast.core.Hazelcast; | ||
23 | -import com.hazelcast.core.HazelcastInstance; | ||
24 | - | ||
25 | -import org.apache.felix.scr.annotations.Activate; | ||
26 | -import org.apache.felix.scr.annotations.Component; | ||
27 | -import org.apache.felix.scr.annotations.Deactivate; | ||
28 | -import org.apache.felix.scr.annotations.Service; | ||
29 | -import org.onosproject.store.cluster.impl.ClusterDefinitionManager; | ||
30 | -import org.slf4j.Logger; | ||
31 | -import org.slf4j.LoggerFactory; | ||
32 | - | ||
33 | -import java.io.File; | ||
34 | -import java.io.FileNotFoundException; | ||
35 | -import java.io.IOException; | ||
36 | -import java.io.InputStream; | ||
37 | - | ||
38 | -/** | ||
39 | - * Auxiliary bootstrap of distributed store. | ||
40 | - */ | ||
41 | -@Component(immediate = false, enabled = false) | ||
42 | -@Service | ||
43 | -public class StoreManager implements StoreService { | ||
44 | - | ||
45 | - protected static final String HAZELCAST_XML_FILE = "etc/hazelcast.xml"; | ||
46 | - | ||
47 | - private final Logger log = LoggerFactory.getLogger(getClass()); | ||
48 | - | ||
49 | - protected HazelcastInstance instance; | ||
50 | - | ||
51 | - @Activate | ||
52 | - public void activate() { | ||
53 | - try { | ||
54 | - File hazelcastFile = new File(HAZELCAST_XML_FILE); | ||
55 | - if (!hazelcastFile.exists()) { | ||
56 | - createDefaultHazelcastFile(hazelcastFile); | ||
57 | - } | ||
58 | - | ||
59 | - Config config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); | ||
60 | - | ||
61 | - instance = Hazelcast.newHazelcastInstance(config); | ||
62 | - log.info("Started"); | ||
63 | - } catch (FileNotFoundException e) { | ||
64 | - log.error("Unable to configure Hazelcast", e); | ||
65 | - } | ||
66 | - } | ||
67 | - | ||
68 | - private void createDefaultHazelcastFile(File hazelcastFile) { | ||
69 | - String ip = ClusterDefinitionManager.getSiteLocalAddress(); | ||
70 | - String ipPrefix = ip.replaceFirst("\\.[0-9]*$", ".*"); | ||
71 | - InputStream his = getClass().getResourceAsStream("/hazelcast.xml"); | ||
72 | - try { | ||
73 | - String hzCfg = new String(ByteStreams.toByteArray(his), "UTF-8"); | ||
74 | - hzCfg = hzCfg.replaceFirst("@NAME", ip); | ||
75 | - hzCfg = hzCfg.replaceFirst("@PREFIX", ipPrefix); | ||
76 | - Files.write(hzCfg.getBytes("UTF-8"), hazelcastFile); | ||
77 | - } catch (IOException e) { | ||
78 | - log.error("Unable to write default hazelcast file", e); | ||
79 | - } | ||
80 | - } | ||
81 | - | ||
82 | - @Deactivate | ||
83 | - public void deactivate() { | ||
84 | - instance.shutdown(); | ||
85 | - log.info("Stopped"); | ||
86 | - } | ||
87 | - | ||
88 | - @Override | ||
89 | - public HazelcastInstance getHazelcastInstance() { | ||
90 | - return instance; | ||
91 | - } | ||
92 | - | ||
93 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import com.hazelcast.core.HazelcastInstance; | ||
19 | - | ||
20 | -/** | ||
21 | - * Bootstrap service to get a handle on a share Hazelcast instance. | ||
22 | - */ | ||
23 | -public interface StoreService { | ||
24 | - | ||
25 | - /** | ||
26 | - * Returns the shared Hazelcast instance for use as a distributed store | ||
27 | - * backing. | ||
28 | - * | ||
29 | - * @return shared Hazelcast instance | ||
30 | - */ | ||
31 | - HazelcastInstance getHazelcastInstance(); | ||
32 | - | ||
33 | -} |
1 | -/* | ||
2 | - * Copyright 2014 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | - | ||
17 | -/** | ||
18 | - * Common abstractions and facilities for implementing distributed store | ||
19 | - * using Hazelcast. | ||
20 | - */ | ||
21 | -package org.onosproject.store.hz; |
This diff is collapsed. Click to expand it.
... | @@ -27,7 +27,7 @@ import com.esotericsoftware.kryo.io.Input; | ... | @@ -27,7 +27,7 @@ import com.esotericsoftware.kryo.io.Input; |
27 | import com.esotericsoftware.kryo.io.Output; | 27 | import com.esotericsoftware.kryo.io.Output; |
28 | 28 | ||
29 | /** | 29 | /** |
30 | - * Serializer for RoleValues used by {@link DistributedMastershipStore}. | 30 | + * Serializer for RoleValues used by {@link org.onosproject.mastership.MastershipStore}. |
31 | */ | 31 | */ |
32 | public class RoleValueSerializer extends Serializer<RoleValue> { | 32 | public class RoleValueSerializer extends Serializer<RoleValue> { |
33 | 33 | ... | ... |
This diff is collapsed. Click to expand it.
1 | -<?xml version="1.0" encoding="UTF-8"?> | ||
2 | - | ||
3 | -<!-- | ||
4 | - ~ Copyright 2015 Open Networking Laboratory | ||
5 | - ~ | ||
6 | - ~ Licensed under the Apache License, Version 2.0 (the "License"); | ||
7 | - ~ you may not use this file except in compliance with the License. | ||
8 | - ~ You may obtain a copy of the License at | ||
9 | - ~ | ||
10 | - ~ http://www.apache.org/licenses/LICENSE-2.0 | ||
11 | - ~ | ||
12 | - ~ Unless required by applicable law or agreed to in writing, software | ||
13 | - ~ distributed under the License is distributed on an "AS IS" BASIS, | ||
14 | - ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
15 | - ~ See the License for the specific language governing permissions and | ||
16 | - ~ limitations under the License. | ||
17 | - --> | ||
18 | - | ||
19 | -<!-- | ||
20 | - The default Hazelcast configuration. This is used when: | ||
21 | - | ||
22 | - - no hazelcast.xml if present | ||
23 | - | ||
24 | ---> | ||
25 | -<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd" | ||
26 | - xmlns="http://www.hazelcast.com/schema/config" | ||
27 | - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> | ||
28 | - <group> | ||
29 | - <name>@NAME</name> | ||
30 | - <password>rocks</password> | ||
31 | - </group> | ||
32 | - <management-center enabled="false">http://localhost:8080/mancenter</management-center> | ||
33 | - <properties> | ||
34 | - <property name="hazelcast.max.no.heartbeat.seconds">30</property> | ||
35 | - <property name="hazelcast.merge.first.run.delay.seconds">30</property> | ||
36 | - <property name="hazelcast.merge.next.run.delay.seconds">30</property> | ||
37 | - </properties> | ||
38 | - <network> | ||
39 | - <port auto-increment="true" port-count="100">5701</port> | ||
40 | - <outbound-ports> | ||
41 | - <!-- | ||
42 | - Allowed port range when connecting to other nodes. | ||
43 | - 0 or * means use system provided port. | ||
44 | - --> | ||
45 | - <ports>0</ports> | ||
46 | - </outbound-ports> | ||
47 | - <join> | ||
48 | - <multicast enabled="true"> | ||
49 | - <multicast-group>224.2.2.3</multicast-group> | ||
50 | - <multicast-port>54327</multicast-port> | ||
51 | - </multicast> | ||
52 | - <tcp-ip enabled="false"> | ||
53 | - <interface>127.0.0.1</interface> | ||
54 | - </tcp-ip> | ||
55 | - </join> | ||
56 | - <interfaces enabled="true"> | ||
57 | - <interface>@PREFIX</interface> | ||
58 | - </interfaces> | ||
59 | - <ssl enabled="false"/> | ||
60 | - <socket-interceptor enabled="false"/> | ||
61 | - <symmetric-encryption enabled="false"> | ||
62 | - <!-- | ||
63 | - encryption algorithm such as | ||
64 | - DES/ECB/PKCS5Padding, | ||
65 | - PBEWithMD5AndDES, | ||
66 | - AES/CBC/PKCS5Padding, | ||
67 | - Blowfish, | ||
68 | - DESede | ||
69 | - --> | ||
70 | - <algorithm>PBEWithMD5AndDES</algorithm> | ||
71 | - <!-- salt value to use when generating the secret key --> | ||
72 | - <salt>thesalt</salt> | ||
73 | - <!-- pass phrase to use when generating the secret key --> | ||
74 | - <password>thepass</password> | ||
75 | - <!-- iteration count to use when generating the secret key --> | ||
76 | - <iteration-count>19</iteration-count> | ||
77 | - </symmetric-encryption> | ||
78 | - </network> | ||
79 | - <partition-group enabled="false"/> | ||
80 | - <executor-service name="default"> | ||
81 | - <pool-size>16</pool-size> | ||
82 | - <!--Queue capacity. 0 means Integer.MAX_VALUE.--> | ||
83 | - <queue-capacity>0</queue-capacity> | ||
84 | - </executor-service> | ||
85 | - <queue name="default"> | ||
86 | - <!-- | ||
87 | - Maximum size of the queue. When a JVM's local queue size reaches the maximum, | ||
88 | - all put/offer operations will get blocked until the queue size | ||
89 | - of the JVM goes down below the maximum. | ||
90 | - Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
91 | - Integer.MAX_VALUE. Default is 0. | ||
92 | - --> | ||
93 | - <max-size>0</max-size> | ||
94 | - <!-- | ||
95 | - Number of backups. If 1 is set as the backup-count for example, | ||
96 | - then all entries of the map will be copied to another JVM for | ||
97 | - fail-safety. 0 means no backup. | ||
98 | - --> | ||
99 | - <backup-count>1</backup-count> | ||
100 | - | ||
101 | - <!-- | ||
102 | - Number of async backups. 0 means no backup. | ||
103 | - --> | ||
104 | - <async-backup-count>0</async-backup-count> | ||
105 | - | ||
106 | - <empty-queue-ttl>-1</empty-queue-ttl> | ||
107 | - </queue> | ||
108 | - <map name="default"> | ||
109 | - <!-- | ||
110 | - Data type that will be used for storing recordMap. | ||
111 | - Possible values: | ||
112 | - BINARY (default): keys and values will be stored as binary data | ||
113 | - OBJECT : values will be stored in their object forms | ||
114 | - OFFHEAP : values will be stored in non-heap region of JVM | ||
115 | - --> | ||
116 | - <in-memory-format>BINARY</in-memory-format> | ||
117 | - | ||
118 | - <!-- | ||
119 | - Number of backups. If 1 is set as the backup-count for example, | ||
120 | - then all entries of the map will be copied to another JVM for | ||
121 | - fail-safety. 0 means no backup. | ||
122 | - --> | ||
123 | - <backup-count>1</backup-count> | ||
124 | - <!-- | ||
125 | - Number of async backups. 0 means no backup. | ||
126 | - --> | ||
127 | - <async-backup-count>0</async-backup-count> | ||
128 | - <!-- | ||
129 | - Maximum number of seconds for each entry to stay in the map. Entries that are | ||
130 | - older than <time-to-live-seconds> and not updated for <time-to-live-seconds> | ||
131 | - will get automatically evicted from the map. | ||
132 | - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
133 | - --> | ||
134 | - <time-to-live-seconds>0</time-to-live-seconds> | ||
135 | - <!-- | ||
136 | - Maximum number of seconds for each entry to stay idle in the map. Entries that are | ||
137 | - idle(not touched) for more than <max-idle-seconds> will get | ||
138 | - automatically evicted from the map. Entry is touched if get, put or containsKey is called. | ||
139 | - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
140 | - --> | ||
141 | - <max-idle-seconds>0</max-idle-seconds> | ||
142 | - <!-- | ||
143 | - Valid values are: | ||
144 | - NONE (no eviction), | ||
145 | - LRU (Least Recently Used), | ||
146 | - LFU (Least Frequently Used). | ||
147 | - NONE is the default. | ||
148 | - --> | ||
149 | - <eviction-policy>NONE</eviction-policy> | ||
150 | - <!-- | ||
151 | - Maximum size of the map. When max size is reached, | ||
152 | - map is evicted based on the policy defined. | ||
153 | - Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
154 | - Integer.MAX_VALUE. Default is 0. | ||
155 | - --> | ||
156 | - <max-size policy="PER_NODE">0</max-size> | ||
157 | - <!-- | ||
158 | - When max. size is reached, specified percentage of | ||
159 | - the map will be evicted. Any integer between 0 and 100. | ||
160 | - If 25 is set for example, 25% of the entries will | ||
161 | - get evicted. | ||
162 | - --> | ||
163 | - <eviction-percentage>25</eviction-percentage> | ||
164 | - <!-- | ||
165 | - Minimum time in milliseconds which should pass before checking | ||
166 | - if a partition of this map is evictable or not. | ||
167 | - Default value is 100 millis. | ||
168 | - --> | ||
169 | - <min-eviction-check-millis>100</min-eviction-check-millis> | ||
170 | - <!-- | ||
171 | - While recovering from split-brain (network partitioning), | ||
172 | - map entries in the small cluster will merge into the bigger cluster | ||
173 | - based on the policy set here. When an entry merge into the | ||
174 | - cluster, there might an existing entry with the same key already. | ||
175 | - Values of these entries might be different for that same key. | ||
176 | - Which value should be set for the key? Conflict is resolved by | ||
177 | - the policy set here. Default policy is PutIfAbsentMapMergePolicy | ||
178 | - | ||
179 | - There are built-in merge policies such as | ||
180 | - com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key. | ||
181 | - com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster. | ||
182 | - com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. | ||
183 | - com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins. | ||
184 | - --> | ||
185 | - <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy> | ||
186 | - </map> | ||
187 | - | ||
188 | - <multimap name="default"> | ||
189 | - <backup-count>1</backup-count> | ||
190 | - <value-collection-type>SET</value-collection-type> | ||
191 | - </multimap> | ||
192 | - | ||
193 | - <multimap name="default"> | ||
194 | - <backup-count>1</backup-count> | ||
195 | - <value-collection-type>SET</value-collection-type> | ||
196 | - </multimap> | ||
197 | - | ||
198 | - <list name="default"> | ||
199 | - <backup-count>1</backup-count> | ||
200 | - </list> | ||
201 | - | ||
202 | - <set name="default"> | ||
203 | - <backup-count>1</backup-count> | ||
204 | - </set> | ||
205 | - | ||
206 | - <jobtracker name="default"> | ||
207 | - <max-thread-size>0</max-thread-size> | ||
208 | - <!-- Queue size 0 means number of partitions * 2 --> | ||
209 | - <queue-size>0</queue-size> | ||
210 | - <retry-count>0</retry-count> | ||
211 | - <chunk-size>1000</chunk-size> | ||
212 | - <communicate-stats>true</communicate-stats> | ||
213 | - <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy> | ||
214 | - </jobtracker> | ||
215 | - | ||
216 | - <semaphore name="default"> | ||
217 | - <initial-permits>0</initial-permits> | ||
218 | - <backup-count>1</backup-count> | ||
219 | - <async-backup-count>0</async-backup-count> | ||
220 | - </semaphore> | ||
221 | - | ||
222 | - <serialization> | ||
223 | - <portable-version>0</portable-version> | ||
224 | - </serialization> | ||
225 | - | ||
226 | - <services enable-defaults="true"/> | ||
227 | - | ||
228 | -</hazelcast> |
1 | -/* | ||
2 | - * Copyright 2014-2015 Open Networking Laboratory | ||
3 | - * | ||
4 | - * Licensed under the Apache License, Version 2.0 (the "License"); | ||
5 | - * you may not use this file except in compliance with the License. | ||
6 | - * You may obtain a copy of the License at | ||
7 | - * | ||
8 | - * http://www.apache.org/licenses/LICENSE-2.0 | ||
9 | - * | ||
10 | - * Unless required by applicable law or agreed to in writing, software | ||
11 | - * distributed under the License is distributed on an "AS IS" BASIS, | ||
12 | - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
13 | - * See the License for the specific language governing permissions and | ||
14 | - * limitations under the License. | ||
15 | - */ | ||
16 | -package org.onosproject.store.hz; | ||
17 | - | ||
18 | -import static com.google.common.base.Preconditions.checkArgument; | ||
19 | -import static com.google.common.base.Preconditions.checkState; | ||
20 | - | ||
21 | -import java.io.FileNotFoundException; | ||
22 | -import java.util.UUID; | ||
23 | - | ||
24 | -import com.hazelcast.config.Config; | ||
25 | -import com.hazelcast.config.FileSystemXmlConfig; | ||
26 | -import com.hazelcast.core.HazelcastInstance; | ||
27 | -import com.hazelcast.test.TestHazelcastInstanceFactory; | ||
28 | - | ||
29 | -/** | ||
30 | - * Dummy StoreManager to use specified Hazelcast instance. | ||
31 | - */ | ||
32 | -public class TestStoreManager extends StoreManager { | ||
33 | - | ||
34 | - private TestHazelcastInstanceFactory factory; | ||
35 | - | ||
36 | - /** | ||
37 | - * Gets the Hazelcast Config for testing. | ||
38 | - * | ||
39 | - * @return Hazelcast Configuration for testing | ||
40 | - */ | ||
41 | - public static Config getTestConfig() { | ||
42 | - Config config; | ||
43 | - try { | ||
44 | - config = new FileSystemXmlConfig(HAZELCAST_XML_FILE); | ||
45 | - } catch (FileNotFoundException e) { | ||
46 | - // falling back to default | ||
47 | - config = new Config(); | ||
48 | - } | ||
49 | - // avoid accidentally joining other cluster | ||
50 | - config.getGroupConfig().setName(UUID.randomUUID().toString()); | ||
51 | - // quickly form single node cluster | ||
52 | - config.getNetworkConfig().getJoin() | ||
53 | - .getTcpIpConfig() | ||
54 | - .setEnabled(true).setConnectionTimeoutSeconds(0); | ||
55 | - config.getNetworkConfig().getJoin() | ||
56 | - .getMulticastConfig() | ||
57 | - .setEnabled(false); | ||
58 | - return config; | ||
59 | - } | ||
60 | - | ||
61 | - /** | ||
62 | - * Creates an instance of dummy Hazelcast instance for testing. | ||
63 | - * | ||
64 | - * @return HazelcastInstance | ||
65 | - */ | ||
66 | - public HazelcastInstance initSingleInstance() { | ||
67 | - return initInstances(1)[0]; | ||
68 | - } | ||
69 | - | ||
70 | - /** | ||
71 | - * Creates some instances of dummy Hazelcast instances for testing. | ||
72 | - * | ||
73 | - * @param count number of instances to create | ||
74 | - * @return array of HazelcastInstances | ||
75 | - */ | ||
76 | - public HazelcastInstance[] initInstances(int count) { | ||
77 | - checkArgument(count > 0, "Cluster size must be > 0"); | ||
78 | - factory = new TestHazelcastInstanceFactory(count); | ||
79 | - return factory.newInstances(getTestConfig()); | ||
80 | - } | ||
81 | - | ||
82 | - /** | ||
83 | - * Sets the Hazelast instance to return on #getHazelcastInstance(). | ||
84 | - * | ||
85 | - * @param instance Hazelast instance to return on #getHazelcastInstance() | ||
86 | - */ | ||
87 | - public void setHazelcastInstance(HazelcastInstance instance) { | ||
88 | - this.instance = instance; | ||
89 | - } | ||
90 | - | ||
91 | - @Override | ||
92 | - public void activate() { | ||
93 | - // Hazelcast setup removed from original code. | ||
94 | - checkState(this.instance != null, "HazelcastInstance needs to be set"); | ||
95 | - } | ||
96 | - | ||
97 | - @Override | ||
98 | - public void deactivate() { | ||
99 | - // Hazelcast instance shutdown removed from original code. | ||
100 | - factory.shutdownAll(); | ||
101 | - } | ||
102 | -} |
... | @@ -15,49 +15,11 @@ | ... | @@ -15,49 +15,11 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.store.mastership.impl; | 16 | package org.onosproject.store.mastership.impl; |
17 | 17 | ||
18 | -import java.util.Map; | ||
19 | -import java.util.Set; | ||
20 | -import java.util.concurrent.CountDownLatch; | ||
21 | -import java.util.concurrent.TimeUnit; | ||
22 | - | ||
23 | -import org.junit.After; | ||
24 | -import org.junit.AfterClass; | ||
25 | -import org.junit.Before; | ||
26 | -import org.junit.BeforeClass; | ||
27 | -import org.junit.Ignore; | ||
28 | -import org.junit.Test; | ||
29 | -import org.onlab.junit.TestTools; | ||
30 | -import org.onlab.packet.IpAddress; | ||
31 | -import org.onosproject.cluster.ClusterServiceAdapter; | ||
32 | -import org.onosproject.cluster.ControllerNode; | ||
33 | -import org.onosproject.cluster.DefaultControllerNode; | ||
34 | -import org.onosproject.cluster.NodeId; | ||
35 | -import org.onosproject.mastership.MastershipEvent; | ||
36 | -import org.onosproject.mastership.MastershipEvent.Type; | ||
37 | -import org.onosproject.mastership.MastershipStoreDelegate; | ||
38 | -import org.onosproject.mastership.MastershipTerm; | ||
39 | -import org.onosproject.net.DeviceId; | ||
40 | -import org.onosproject.net.MastershipRole; | ||
41 | -import org.onosproject.store.hz.StoreManager; | ||
42 | -import org.onosproject.store.hz.StoreService; | ||
43 | -import org.onosproject.store.hz.TestStoreManager; | ||
44 | -import org.onosproject.store.serializers.KryoSerializer; | ||
45 | - | ||
46 | -import com.google.common.collect.Sets; | ||
47 | -import com.google.common.util.concurrent.Futures; | ||
48 | - | ||
49 | -import static org.junit.Assert.assertEquals; | ||
50 | -import static org.junit.Assert.assertNull; | ||
51 | -import static org.junit.Assert.assertTrue; | ||
52 | -import static org.onosproject.net.MastershipRole.MASTER; | ||
53 | -import static org.onosproject.net.MastershipRole.NONE; | ||
54 | -import static org.onosproject.net.MastershipRole.STANDBY; | ||
55 | - | ||
56 | /** | 18 | /** |
57 | * Test of the Hazelcast-based distributed MastershipStore implementation. | 19 | * Test of the Hazelcast-based distributed MastershipStore implementation. |
58 | */ | 20 | */ |
59 | public class DistributedMastershipStoreTest { | 21 | public class DistributedMastershipStoreTest { |
60 | - | 22 | +/* |
61 | private static final DeviceId DID1 = DeviceId.deviceId("of:01"); | 23 | private static final DeviceId DID1 = DeviceId.deviceId("of:01"); |
62 | private static final DeviceId DID2 = DeviceId.deviceId("of:02"); | 24 | private static final DeviceId DID2 = DeviceId.deviceId("of:02"); |
63 | private static final DeviceId DID3 = DeviceId.deviceId("of:03"); | 25 | private static final DeviceId DID3 = DeviceId.deviceId("of:03"); |
... | @@ -320,5 +282,5 @@ public class DistributedMastershipStoreTest { | ... | @@ -320,5 +282,5 @@ public class DistributedMastershipStoreTest { |
320 | } | 282 | } |
321 | 283 | ||
322 | } | 284 | } |
323 | - | 285 | +*/ |
324 | } | 286 | } | ... | ... |
... | @@ -15,50 +15,11 @@ | ... | @@ -15,50 +15,11 @@ |
15 | */ | 15 | */ |
16 | package org.onosproject.store.resource.impl; | 16 | package org.onosproject.store.resource.impl; |
17 | 17 | ||
18 | -import java.util.HashSet; | ||
19 | -import java.util.Set; | ||
20 | - | ||
21 | -import org.junit.After; | ||
22 | -import org.junit.Before; | ||
23 | -import org.junit.Test; | ||
24 | -import org.onlab.util.Bandwidth; | ||
25 | -import org.onosproject.net.AnnotationKeys; | ||
26 | -import org.onosproject.net.Annotations; | ||
27 | -import org.onosproject.net.ConnectPoint; | ||
28 | -import org.onosproject.net.DefaultAnnotations; | ||
29 | -import org.onosproject.net.DefaultLink; | ||
30 | -import org.onosproject.net.Link; | ||
31 | -import org.onosproject.net.intent.IntentId; | ||
32 | -import org.onosproject.net.provider.ProviderId; | ||
33 | -import org.onosproject.net.resource.BandwidthResource; | ||
34 | -import org.onosproject.net.resource.BandwidthResourceAllocation; | ||
35 | -import org.onosproject.net.resource.DefaultLinkResourceAllocations; | ||
36 | -import org.onosproject.net.resource.DefaultLinkResourceRequest; | ||
37 | -import org.onosproject.net.resource.LambdaResource; | ||
38 | -import org.onosproject.net.resource.LambdaResourceAllocation; | ||
39 | -import org.onosproject.net.resource.LinkResourceAllocations; | ||
40 | -import org.onosproject.net.resource.LinkResourceRequest; | ||
41 | -import org.onosproject.net.resource.LinkResourceStore; | ||
42 | -import org.onosproject.net.resource.ResourceAllocation; | ||
43 | -import org.onosproject.net.resource.ResourceAllocationException; | ||
44 | -import org.onosproject.net.resource.ResourceType; | ||
45 | -import org.onosproject.store.hz.StoreService; | ||
46 | -import org.onosproject.store.hz.TestStoreManager; | ||
47 | - | ||
48 | -import com.google.common.collect.ImmutableMap; | ||
49 | -import com.google.common.collect.ImmutableSet; | ||
50 | -import static org.junit.Assert.assertEquals; | ||
51 | -import static org.junit.Assert.assertFalse; | ||
52 | -import static org.junit.Assert.assertNotNull; | ||
53 | -import static org.onosproject.net.DeviceId.deviceId; | ||
54 | -import static org.onosproject.net.Link.Type.DIRECT; | ||
55 | -import static org.onosproject.net.PortNumber.portNumber; | ||
56 | - | ||
57 | /** | 18 | /** |
58 | * Test of the simple LinkResourceStore implementation. | 19 | * Test of the simple LinkResourceStore implementation. |
59 | */ | 20 | */ |
60 | public class HazelcastLinkResourceStoreTest { | 21 | public class HazelcastLinkResourceStoreTest { |
61 | - | 22 | +/* |
62 | private LinkResourceStore store; | 23 | private LinkResourceStore store; |
63 | private HazelcastLinkResourceStore storeImpl; | 24 | private HazelcastLinkResourceStore storeImpl; |
64 | private Link link1; | 25 | private Link link1; |
... | @@ -74,7 +35,7 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -74,7 +35,7 @@ public class HazelcastLinkResourceStoreTest { |
74 | * @param dev2 destination device | 35 | * @param dev2 destination device |
75 | * @param port2 destination port | 36 | * @param port2 destination port |
76 | * @return created {@link Link} object | 37 | * @return created {@link Link} object |
77 | - */ | 38 | + * / |
78 | private Link newLink(String dev1, int port1, String dev2, int port2) { | 39 | private Link newLink(String dev1, int port1, String dev2, int port2) { |
79 | Annotations annotations = DefaultAnnotations.builder() | 40 | Annotations annotations = DefaultAnnotations.builder() |
80 | .set(AnnotationKeys.OPTICAL_WAVES, "80") | 41 | .set(AnnotationKeys.OPTICAL_WAVES, "80") |
... | @@ -112,9 +73,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -112,9 +73,6 @@ public class HazelcastLinkResourceStoreTest { |
112 | storeMgr.deactivate(); | 73 | storeMgr.deactivate(); |
113 | } | 74 | } |
114 | 75 | ||
115 | - /** | ||
116 | - * Tests constructor and activate method. | ||
117 | - */ | ||
118 | @Test | 76 | @Test |
119 | public void testConstructorAndActivate() { | 77 | public void testConstructorAndActivate() { |
120 | final Iterable<LinkResourceAllocations> allAllocations = store.getAllocations(); | 78 | final Iterable<LinkResourceAllocations> allAllocations = store.getAllocations(); |
... | @@ -130,13 +88,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -130,13 +88,6 @@ public class HazelcastLinkResourceStoreTest { |
130 | assertNotNull(res); | 88 | assertNotNull(res); |
131 | } | 89 | } |
132 | 90 | ||
133 | - /** | ||
134 | - * Picks up and returns one of bandwidth allocations from a given set. | ||
135 | - * | ||
136 | - * @param resources the set of {@link ResourceAllocation}s | ||
137 | - * @return {@link BandwidthResourceAllocation} object if found, null | ||
138 | - * otherwise | ||
139 | - */ | ||
140 | private BandwidthResourceAllocation getBandwidthObj(Set<ResourceAllocation> resources) { | 91 | private BandwidthResourceAllocation getBandwidthObj(Set<ResourceAllocation> resources) { |
141 | for (ResourceAllocation res : resources) { | 92 | for (ResourceAllocation res : resources) { |
142 | if (res.type() == ResourceType.BANDWIDTH) { | 93 | if (res.type() == ResourceType.BANDWIDTH) { |
... | @@ -146,12 +97,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -146,12 +97,6 @@ public class HazelcastLinkResourceStoreTest { |
146 | return null; | 97 | return null; |
147 | } | 98 | } |
148 | 99 | ||
149 | - /** | ||
150 | - * Returns all lambda allocations from a given set. | ||
151 | - * | ||
152 | - * @param resources the set of {@link ResourceAllocation}s | ||
153 | - * @return a set of {@link LambdaResourceAllocation} objects | ||
154 | - */ | ||
155 | private Set<LambdaResourceAllocation> getLambdaObjs(Set<ResourceAllocation> resources) { | 100 | private Set<LambdaResourceAllocation> getLambdaObjs(Set<ResourceAllocation> resources) { |
156 | Set<LambdaResourceAllocation> lambdaResources = new HashSet<>(); | 101 | Set<LambdaResourceAllocation> lambdaResources = new HashSet<>(); |
157 | for (ResourceAllocation res : resources) { | 102 | for (ResourceAllocation res : resources) { |
... | @@ -162,9 +107,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -162,9 +107,6 @@ public class HazelcastLinkResourceStoreTest { |
162 | return lambdaResources; | 107 | return lambdaResources; |
163 | } | 108 | } |
164 | 109 | ||
165 | - /** | ||
166 | - * Tests initial free bandwidth for a link. | ||
167 | - */ | ||
168 | @Test | 110 | @Test |
169 | public void testInitialBandwidth() { | 111 | public void testInitialBandwidth() { |
170 | final Set<ResourceAllocation> freeRes = store.getFreeResources(link1); | 112 | final Set<ResourceAllocation> freeRes = store.getFreeResources(link1); |
... | @@ -176,9 +118,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -176,9 +118,6 @@ public class HazelcastLinkResourceStoreTest { |
176 | assertEquals(new BandwidthResource(Bandwidth.mbps(1000.0)), alloc.bandwidth()); | 118 | assertEquals(new BandwidthResource(Bandwidth.mbps(1000.0)), alloc.bandwidth()); |
177 | } | 119 | } |
178 | 120 | ||
179 | - /** | ||
180 | - * Tests initial free lambda for a link. | ||
181 | - */ | ||
182 | @Test | 121 | @Test |
183 | public void testInitialLambdas() { | 122 | public void testInitialLambdas() { |
184 | final Set<ResourceAllocation> freeRes = store.getFreeResources(link3); | 123 | final Set<ResourceAllocation> freeRes = store.getFreeResources(link3); |
... | @@ -198,9 +137,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -198,9 +137,6 @@ public class HazelcastLinkResourceStoreTest { |
198 | 137 | ||
199 | } | 138 | } |
200 | 139 | ||
201 | - /** | ||
202 | - * Tests a successful bandwidth allocation. | ||
203 | - */ | ||
204 | @Test | 140 | @Test |
205 | public void testSuccessfulBandwidthAllocation() { | 141 | public void testSuccessfulBandwidthAllocation() { |
206 | final Link link = newLink("of:1", 1, "of:2", 2); | 142 | final Link link = newLink("of:1", 1, "of:2", 2); |
... | @@ -219,9 +155,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -219,9 +155,6 @@ public class HazelcastLinkResourceStoreTest { |
219 | store.allocateResources(allocations); | 155 | store.allocateResources(allocations); |
220 | } | 156 | } |
221 | 157 | ||
222 | - /** | ||
223 | - * Tests a unsuccessful bandwidth allocation. | ||
224 | - */ | ||
225 | @Test | 158 | @Test |
226 | public void testUnsuccessfulBandwidthAllocation() { | 159 | public void testUnsuccessfulBandwidthAllocation() { |
227 | final Link link = newLink("of:1", 1, "of:2", 2); | 160 | final Link link = newLink("of:1", 1, "of:2", 2); |
... | @@ -247,9 +180,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -247,9 +180,6 @@ public class HazelcastLinkResourceStoreTest { |
247 | assertEquals(true, gotException); | 180 | assertEquals(true, gotException); |
248 | } | 181 | } |
249 | 182 | ||
250 | - /** | ||
251 | - * Tests a successful bandwidth allocation. | ||
252 | - */ | ||
253 | @Test | 183 | @Test |
254 | public void testSuccessfulLambdaAllocation() { | 184 | public void testSuccessfulLambdaAllocation() { |
255 | final Link link = newLink("of:1", 1, "of:2", 2); | 185 | final Link link = newLink("of:1", 1, "of:2", 2); |
... | @@ -268,9 +198,6 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -268,9 +198,6 @@ public class HazelcastLinkResourceStoreTest { |
268 | store.allocateResources(allocations); | 198 | store.allocateResources(allocations); |
269 | } | 199 | } |
270 | 200 | ||
271 | - /** | ||
272 | - * Tests a unsuccessful bandwidth allocation. | ||
273 | - */ | ||
274 | @Test | 201 | @Test |
275 | public void testUnsuccessfulLambdaAllocation() { | 202 | public void testUnsuccessfulLambdaAllocation() { |
276 | final Link link = newLink("of:1", 1, "of:2", 2); | 203 | final Link link = newLink("of:1", 1, "of:2", 2); |
... | @@ -296,4 +223,5 @@ public class HazelcastLinkResourceStoreTest { | ... | @@ -296,4 +223,5 @@ public class HazelcastLinkResourceStoreTest { |
296 | } | 223 | } |
297 | assertEquals(true, gotException); | 224 | assertEquals(true, gotException); |
298 | } | 225 | } |
226 | + */ | ||
299 | } | 227 | } | ... | ... |
... | @@ -37,7 +37,6 @@ | ... | @@ -37,7 +37,6 @@ |
37 | 37 | ||
38 | <bundle>mvn:joda-time/joda-time/2.5</bundle> | 38 | <bundle>mvn:joda-time/joda-time/2.5</bundle> |
39 | 39 | ||
40 | - <bundle>mvn:com.hazelcast/hazelcast/3.4</bundle> | ||
41 | <bundle>mvn:io.dropwizard.metrics/metrics-core/3.1.0</bundle> | 40 | <bundle>mvn:io.dropwizard.metrics/metrics-core/3.1.0</bundle> |
42 | <bundle>mvn:io.dropwizard.metrics/metrics-json/3.1.0</bundle> | 41 | <bundle>mvn:io.dropwizard.metrics/metrics-json/3.1.0</bundle> |
43 | <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle> | 42 | <bundle>mvn:com.eclipsesource.minimal-json/minimal-json/0.9.1</bundle> | ... | ... |
... | @@ -58,21 +58,15 @@ | ... | @@ -58,21 +58,15 @@ |
58 | <scope>test</scope> | 58 | <scope>test</scope> |
59 | </dependency> | 59 | </dependency> |
60 | <dependency> | 60 | <dependency> |
61 | - <groupId>org.easymock</groupId> | 61 | + <groupId>org.easymock</groupId> |
62 | - <artifactId>easymock</artifactId> | 62 | + <artifactId>easymock</artifactId> |
63 | - <scope>test</scope> | 63 | + <scope>test</scope> |
64 | - </dependency> | ||
65 | - <dependency> | ||
66 | - <groupId>org.onosproject</groupId> | ||
67 | - <artifactId>onos-api</artifactId> | ||
68 | - <classifier>tests</classifier> | ||
69 | - <scope>test</scope> | ||
70 | </dependency> | 64 | </dependency> |
71 | <dependency> | 65 | <dependency> |
72 | - <groupId>com.hazelcast</groupId> | 66 | + <groupId>org.onosproject</groupId> |
73 | - <artifactId>hazelcast</artifactId> | 67 | + <artifactId>onos-api</artifactId> |
74 | - <classifier>tests</classifier> | 68 | + <classifier>tests</classifier> |
75 | - <scope>test</scope> | 69 | + <scope>test</scope> |
76 | </dependency> | 70 | </dependency> |
77 | </dependencies> | 71 | </dependencies> |
78 | 72 | ... | ... |
... | @@ -287,18 +287,6 @@ | ... | @@ -287,18 +287,6 @@ |
287 | </dependency> | 287 | </dependency> |
288 | 288 | ||
289 | <dependency> | 289 | <dependency> |
290 | - <groupId>com.hazelcast</groupId> | ||
291 | - <artifactId>hazelcast</artifactId> | ||
292 | - <version>3.4</version> | ||
293 | - </dependency> | ||
294 | - <dependency> | ||
295 | - <groupId>com.hazelcast</groupId> | ||
296 | - <artifactId>hazelcast</artifactId> | ||
297 | - <version>3.4</version> | ||
298 | - <classifier>tests</classifier> | ||
299 | - <scope>test</scope> | ||
300 | - </dependency> | ||
301 | - <dependency> | ||
302 | <groupId>com.eclipsesource.minimal-json</groupId> | 290 | <groupId>com.eclipsesource.minimal-json</groupId> |
303 | <artifactId>minimal-json</artifactId> | 291 | <artifactId>minimal-json</artifactId> |
304 | <version>0.9.1</version> | 292 | <version>0.9.1</version> | ... | ... |
... | @@ -17,7 +17,7 @@ export STAGE=$(dirname $KARAF_ROOT) | ... | @@ -17,7 +17,7 @@ export STAGE=$(dirname $KARAF_ROOT) |
17 | # Validates the specified IP regular expression against existing adapters. | 17 | # Validates the specified IP regular expression against existing adapters. |
18 | # Excludes local-loopback. | 18 | # Excludes local-loopback. |
19 | function validateIp { | 19 | function validateIp { |
20 | - ifconfig | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep -v "127\.0\.0\.1" | grep $1 | 20 | + ifconfig | awk '{ print $2}' | grep -E -o "([0-9]{1,3}[\.]){3}[0-9]{1,3}" | grep $1 |
21 | } | 21 | } |
22 | 22 | ||
23 | # Clean the previous Karaf directory if requested and if it exists. | 23 | # Clean the previous Karaf directory if requested and if it exists. |
... | @@ -26,6 +26,7 @@ if [ "$1" = "clean" ]; then | ... | @@ -26,6 +26,7 @@ if [ "$1" = "clean" ]; then |
26 | [ -d $KARAF_ROOT ] && rm -fr $KARAF_ROOT $STAGE/apps $STAGE/config | 26 | [ -d $KARAF_ROOT ] && rm -fr $KARAF_ROOT $STAGE/apps $STAGE/config |
27 | fi | 27 | fi |
28 | 28 | ||
29 | +ONOS_IP=${ONOS_IP:-127.0.0.1} | ||
29 | IP="${1:-$ONOS_IP}" | 30 | IP="${1:-$ONOS_IP}" |
30 | 31 | ||
31 | # If IP was not given, nor configured attempt to use ONOS_NIC env. variable | 32 | # If IP was not given, nor configured attempt to use ONOS_NIC env. variable |
... | @@ -104,11 +105,6 @@ cat > $STAGE/config/tablets.json <<EOF | ... | @@ -104,11 +105,6 @@ cat > $STAGE/config/tablets.json <<EOF |
104 | "partitions": { "p1": [ { "ip": "$IP", "id": "$IP", "tcpPort": 9876 }]}} | 105 | "partitions": { "p1": [ { "ip": "$IP", "id": "$IP", "tcpPort": 9876 }]}} |
105 | EOF | 106 | EOF |
106 | 107 | ||
107 | -echo "Setting up hazelcast.xml for subnet $SUBNET.*..." | ||
108 | -cp $ONOS_ROOT/tools/package/etc/hazelcast.xml $KARAF_ROOT/etc/hazelcast.xml | ||
109 | -perl -pi.old -e "s/192.168.56/$SUBNET/" $KARAF_ROOT/etc/hazelcast.xml | ||
110 | -perl -pi.old -e "s/ <name>onos</ <name>$IP</" $KARAF_ROOT/etc/hazelcast.xml | ||
111 | - | ||
112 | echo "Staging builtin apps..." | 108 | echo "Staging builtin apps..." |
113 | rm -fr $STAGE/apps | 109 | rm -fr $STAGE/apps |
114 | onos-stage-apps $STAGE/apps $KARAF_ROOT/system | 110 | onos-stage-apps $STAGE/apps $KARAF_ROOT/system | ... | ... |
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | -# ----------------------------------------------------------------------------- | 2 | +echo "This command has been deprecated as this step is no longer required." |
3 | -# Configures ONOS to multicast on the specified IP prefix/subnet. | ||
4 | -# ----------------------------------------------------------------------------- | ||
5 | - | ||
6 | -[ $# -lt 2 ] && echo "usage: $(basename $0) name ipPrefix" && exit 1 | ||
7 | - | ||
8 | -name=$1 | ||
9 | -ipPrefix=$2 | ||
10 | - | ||
11 | -hzXml=$(dirname $0)/../apache-karaf-*/etc/hazelcast.xml | ||
12 | - | ||
13 | -perl -pi.bak -e "s/^ <interface>[^<]*/ <interface>$ipPrefix/g" $hzXml | ||
14 | -perl -pi -e "s/ <name>[^<]*/ <name>$name/g" $hzXml | ||
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
tools/package/etc/hazelcast.xml
deleted
100644 → 0
1 | -<?xml version="1.0" encoding="UTF-8"?> | ||
2 | - | ||
3 | -<!-- | ||
4 | - ~ Copyright (c) 2008-2013, Hazelcast, Inc. All Rights Reserved. | ||
5 | - ~ Copyright 2014 Open Networking Laboratory | ||
6 | - ~ | ||
7 | - ~ Licensed under the Apache License, Version 2.0 (the "License"); | ||
8 | - ~ you may not use this file except in compliance with the License. | ||
9 | - ~ You may obtain a copy of the License at | ||
10 | - ~ | ||
11 | - ~ http://www.apache.org/licenses/LICENSE-2.0 | ||
12 | - ~ | ||
13 | - ~ Unless required by applicable law or agreed to in writing, software | ||
14 | - ~ distributed under the License is distributed on an "AS IS" BASIS, | ||
15 | - ~ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
16 | - ~ See the License for the specific language governing permissions and | ||
17 | - ~ limitations under the License. | ||
18 | - --> | ||
19 | - | ||
20 | -<!-- | ||
21 | - The default Hazelcast configuration. This is used when: | ||
22 | - | ||
23 | - - no hazelcast.xml if present | ||
24 | - | ||
25 | ---> | ||
26 | -<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.3.xsd" | ||
27 | - xmlns="http://www.hazelcast.com/schema/config" | ||
28 | - xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"> | ||
29 | - <group> | ||
30 | - <name>onos</name> | ||
31 | - <password>rocks</password> | ||
32 | - </group> | ||
33 | - <management-center enabled="false">http://localhost:8080/mancenter</management-center> | ||
34 | - <properties> | ||
35 | - <property name="hazelcast.max.no.heartbeat.seconds">30</property> | ||
36 | - <property name="hazelcast.merge.first.run.delay.seconds">30</property> | ||
37 | - <property name="hazelcast.merge.next.run.delay.seconds">30</property> | ||
38 | - </properties> | ||
39 | - <network> | ||
40 | - <port auto-increment="true" port-count="100">5701</port> | ||
41 | - <outbound-ports> | ||
42 | - <!-- | ||
43 | - Allowed port range when connecting to other nodes. | ||
44 | - 0 or * means use system provided port. | ||
45 | - --> | ||
46 | - <ports>0</ports> | ||
47 | - </outbound-ports> | ||
48 | - <join> | ||
49 | - <multicast enabled="true"> | ||
50 | - <multicast-group>224.2.2.3</multicast-group> | ||
51 | - <multicast-port>54327</multicast-port> | ||
52 | - </multicast> | ||
53 | - <tcp-ip enabled="false"> | ||
54 | - <interface>127.0.0.1</interface> | ||
55 | - </tcp-ip> | ||
56 | - </join> | ||
57 | - <interfaces enabled="true"> | ||
58 | - <interface>192.168.56.*</interface> | ||
59 | - </interfaces> | ||
60 | - <ssl enabled="false"/> | ||
61 | - <socket-interceptor enabled="false"/> | ||
62 | - <symmetric-encryption enabled="false"> | ||
63 | - <!-- | ||
64 | - encryption algorithm such as | ||
65 | - DES/ECB/PKCS5Padding, | ||
66 | - PBEWithMD5AndDES, | ||
67 | - AES/CBC/PKCS5Padding, | ||
68 | - Blowfish, | ||
69 | - DESede | ||
70 | - --> | ||
71 | - <algorithm>PBEWithMD5AndDES</algorithm> | ||
72 | - <!-- salt value to use when generating the secret key --> | ||
73 | - <salt>thesalt</salt> | ||
74 | - <!-- pass phrase to use when generating the secret key --> | ||
75 | - <password>thepass</password> | ||
76 | - <!-- iteration count to use when generating the secret key --> | ||
77 | - <iteration-count>19</iteration-count> | ||
78 | - </symmetric-encryption> | ||
79 | - </network> | ||
80 | - <partition-group enabled="false"/> | ||
81 | - <executor-service name="default"> | ||
82 | - <pool-size>16</pool-size> | ||
83 | - <!--Queue capacity. 0 means Integer.MAX_VALUE.--> | ||
84 | - <queue-capacity>0</queue-capacity> | ||
85 | - </executor-service> | ||
86 | - <queue name="default"> | ||
87 | - <!-- | ||
88 | - Maximum size of the queue. When a JVM's local queue size reaches the maximum, | ||
89 | - all put/offer operations will get blocked until the queue size | ||
90 | - of the JVM goes down below the maximum. | ||
91 | - Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
92 | - Integer.MAX_VALUE. Default is 0. | ||
93 | - --> | ||
94 | - <max-size>0</max-size> | ||
95 | - <!-- | ||
96 | - Number of backups. If 1 is set as the backup-count for example, | ||
97 | - then all entries of the map will be copied to another JVM for | ||
98 | - fail-safety. 0 means no backup. | ||
99 | - --> | ||
100 | - <backup-count>1</backup-count> | ||
101 | - | ||
102 | - <!-- | ||
103 | - Number of async backups. 0 means no backup. | ||
104 | - --> | ||
105 | - <async-backup-count>0</async-backup-count> | ||
106 | - | ||
107 | - <empty-queue-ttl>-1</empty-queue-ttl> | ||
108 | - </queue> | ||
109 | - <map name="default"> | ||
110 | - <!-- | ||
111 | - Data type that will be used for storing recordMap. | ||
112 | - Possible values: | ||
113 | - BINARY (default): keys and values will be stored as binary data | ||
114 | - OBJECT : values will be stored in their object forms | ||
115 | - OFFHEAP : values will be stored in non-heap region of JVM | ||
116 | - --> | ||
117 | - <in-memory-format>BINARY</in-memory-format> | ||
118 | - | ||
119 | - <!-- | ||
120 | - Number of backups. If 1 is set as the backup-count for example, | ||
121 | - then all entries of the map will be copied to another JVM for | ||
122 | - fail-safety. 0 means no backup. | ||
123 | - --> | ||
124 | - <backup-count>1</backup-count> | ||
125 | - <!-- | ||
126 | - Number of async backups. 0 means no backup. | ||
127 | - --> | ||
128 | - <async-backup-count>0</async-backup-count> | ||
129 | - <!-- | ||
130 | - Maximum number of seconds for each entry to stay in the map. Entries that are | ||
131 | - older than <time-to-live-seconds> and not updated for <time-to-live-seconds> | ||
132 | - will get automatically evicted from the map. | ||
133 | - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
134 | - --> | ||
135 | - <time-to-live-seconds>0</time-to-live-seconds> | ||
136 | - <!-- | ||
137 | - Maximum number of seconds for each entry to stay idle in the map. Entries that are | ||
138 | - idle(not touched) for more than <max-idle-seconds> will get | ||
139 | - automatically evicted from the map. Entry is touched if get, put or containsKey is called. | ||
140 | - Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0. | ||
141 | - --> | ||
142 | - <max-idle-seconds>0</max-idle-seconds> | ||
143 | - <!-- | ||
144 | - Valid values are: | ||
145 | - NONE (no eviction), | ||
146 | - LRU (Least Recently Used), | ||
147 | - LFU (Least Frequently Used). | ||
148 | - NONE is the default. | ||
149 | - --> | ||
150 | - <eviction-policy>NONE</eviction-policy> | ||
151 | - <!-- | ||
152 | - Maximum size of the map. When max size is reached, | ||
153 | - map is evicted based on the policy defined. | ||
154 | - Any integer between 0 and Integer.MAX_VALUE. 0 means | ||
155 | - Integer.MAX_VALUE. Default is 0. | ||
156 | - --> | ||
157 | - <max-size policy="PER_NODE">0</max-size> | ||
158 | - <!-- | ||
159 | - When max. size is reached, specified percentage of | ||
160 | - the map will be evicted. Any integer between 0 and 100. | ||
161 | - If 25 is set for example, 25% of the entries will | ||
162 | - get evicted. | ||
163 | - --> | ||
164 | - <eviction-percentage>25</eviction-percentage> | ||
165 | - <!-- | ||
166 | - Minimum time in milliseconds which should pass before checking | ||
167 | - if a partition of this map is evictable or not. | ||
168 | - Default value is 100 millis. | ||
169 | - --> | ||
170 | - <min-eviction-check-millis>100</min-eviction-check-millis> | ||
171 | - <!-- | ||
172 | - While recovering from split-brain (network partitioning), | ||
173 | - map entries in the small cluster will merge into the bigger cluster | ||
174 | - based on the policy set here. When an entry merge into the | ||
175 | - cluster, there might an existing entry with the same key already. | ||
176 | - Values of these entries might be different for that same key. | ||
177 | - Which value should be set for the key? Conflict is resolved by | ||
178 | - the policy set here. Default policy is PutIfAbsentMapMergePolicy | ||
179 | - | ||
180 | - There are built-in merge policies such as | ||
181 | - com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key. | ||
182 | - com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster. | ||
183 | - com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins. | ||
184 | - com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins. | ||
185 | - --> | ||
186 | - <merge-policy>com.hazelcast.map.merge.PutIfAbsentMapMergePolicy</merge-policy> | ||
187 | - </map> | ||
188 | - | ||
189 | - <multimap name="default"> | ||
190 | - <backup-count>1</backup-count> | ||
191 | - <value-collection-type>SET</value-collection-type> | ||
192 | - </multimap> | ||
193 | - | ||
194 | - <multimap name="default"> | ||
195 | - <backup-count>1</backup-count> | ||
196 | - <value-collection-type>SET</value-collection-type> | ||
197 | - </multimap> | ||
198 | - | ||
199 | - <list name="default"> | ||
200 | - <backup-count>1</backup-count> | ||
201 | - </list> | ||
202 | - | ||
203 | - <set name="default"> | ||
204 | - <backup-count>1</backup-count> | ||
205 | - </set> | ||
206 | - | ||
207 | - <jobtracker name="default"> | ||
208 | - <max-thread-size>0</max-thread-size> | ||
209 | - <!-- Queue size 0 means number of partitions * 2 --> | ||
210 | - <queue-size>0</queue-size> | ||
211 | - <retry-count>0</retry-count> | ||
212 | - <chunk-size>1000</chunk-size> | ||
213 | - <communicate-stats>true</communicate-stats> | ||
214 | - <topology-changed-strategy>CANCEL_RUNNING_OPERATION</topology-changed-strategy> | ||
215 | - </jobtracker> | ||
216 | - | ||
217 | - <semaphore name="default"> | ||
218 | - <initial-permits>0</initial-permits> | ||
219 | - <backup-count>1</backup-count> | ||
220 | - <async-backup-count>0</async-backup-count> | ||
221 | - </semaphore> | ||
222 | - | ||
223 | - <serialization> | ||
224 | - <portable-version>0</portable-version> | ||
225 | - </serialization> | ||
226 | - | ||
227 | - <services enable-defaults="true"/> | ||
228 | - | ||
229 | -</hazelcast> |
... | @@ -26,11 +26,6 @@ echo "]}" >> $CDEF_FILE | ... | @@ -26,11 +26,6 @@ echo "]}" >> $CDEF_FILE |
26 | scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json | 26 | scp -q $CDEF_FILE $remote:$ONOS_INSTALL_DIR/config/cluster.json |
27 | 27 | ||
28 | ssh $remote " | 28 | ssh $remote " |
29 | - sudo perl -pi.bak -e \"s/ <interface>.*</ <interface>${ONOS_NIC:-192.168.56.*}</g\" \ | ||
30 | - $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml | ||
31 | - sudo perl -pi -e \"s/ <name>onos</ <name>${ONOS_CELL:-onos}</g\" \ | ||
32 | - $ONOS_INSTALL_DIR/$KARAF_DIST/etc/hazelcast.xml | ||
33 | - | ||
34 | echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ | 29 | echo \"onos.ip = \$(sudo ifconfig | grep $ONOS_NIC | cut -d: -f2 | cut -d\\ -f1)\" \ |
35 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties | 30 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/system.properties |
36 | 31 | ||
... | @@ -38,10 +33,6 @@ ssh $remote " | ... | @@ -38,10 +33,6 @@ ssh $remote " |
38 | echo "log4j.logger.net.kuujo.copycat= INFO" \ | 33 | echo "log4j.logger.net.kuujo.copycat= INFO" \ |
39 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg | 34 | >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg |
40 | 35 | ||
41 | - # Suppress Hazelcast multicast joiner warning | ||
42 | - echo "log4j.logger.com.hazelcast.cluster.impl.MulticastService= ERROR" \ | ||
43 | - >> $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.ops4j.pax.logging.cfg | ||
44 | - | ||
45 | # Patch the Apache Karaf distribution file to load ONOS boot features | 36 | # Patch the Apache Karaf distribution file to load ONOS boot features |
46 | perl -pi.old -e \"s|^(featuresBoot=.*,management)(,webconsole,.*)|\1,$ONOS_BOOT_FEATURES|\" \ | 37 | perl -pi.old -e \"s|^(featuresBoot=.*,management)(,webconsole,.*)|\1,$ONOS_BOOT_FEATURES|\" \ |
47 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.apache.karaf.features.cfg | 38 | $ONOS_INSTALL_DIR/$KARAF_DIST/etc/org.apache.karaf.features.cfg | ... | ... |
-
Please register or login to post a comment