Ayaka Koshibe

Merge branch 'master' of ssh://gerrit.onlab.us:29418/onos-next

Showing 32 changed files with 1418 additions and 310 deletions
package org.onlab.onos.cluster;
import org.onlab.onos.event.AbstractEvent;
/**
* Describes cluster-related event.
*/
public class ClusterEvent extends AbstractEvent<ClusterEvent.Type, ControllerInstance> {
/**
* Type of device events.
*/
public enum Type {
/**
* Signifies that a new cluster instance has been administratively added.
*/
INSTANCE_ADDED,
/**
* Signifies that a cluster instance has been administratively removed.
*/
INSTANCE_REMOVED,
/**
* Signifies that a cluster instance became active.
*/
INSTANCE_ACTIVE,
/**
* Signifies that a cluster instance became inactive.
*/
INSTANCE_INACTIVE
}
// TODO: do we need to fix the verv/adjective mix? discuss
/**
* Creates an event of a given type and for the specified instance and the
* current time.
*
* @param type cluster event type
* @param instance cluster device subject
*/
public ClusterEvent(Type type, ControllerInstance instance) {
super(type, instance);
}
/**
* Creates an event of a given type and for the specified device and time.
*
* @param type device event type
* @param instance event device subject
* @param time occurrence time
*/
public ClusterEvent(Type type, ControllerInstance instance, long time) {
super(type, instance, time);
}
}
package org.onlab.onos.cluster;
import java.util.Set;
/**
* Service for obtaining information about the individual instances within
* the controller cluster.
*/
public interface ClusterService {
/**
* Returns the set of current cluster members.
*
* @return set of cluster members
*/
Set<ControllerInstance> getInstances();
/**
* Returns the availability state of the specified controller instance.
*
* @return availability state
*/
ControllerInstance.State getState(ControllerInstance instance);
// TODO: determine if this would be better attached to ControllerInstance directly
// addListener, removeListener
}
package org.onlab.onos.cluster;
import org.onlab.packet.IpAddress;
/**
* Represents a controller instance as a member in a cluster.
*/
public interface ControllerInstance {
/** Represents the operational state of the instance. */
public enum State {
/**
* Signifies that the instance is active and operating normally.
*/
ACTIVE,
/**
* Signifies that the instance is inactive, which means either down or
* up, but not operational.
*/
INACTIVE
}
/**
* Returns the instance identifier.
*
* @return instance identifier
*/
InstanceId id();
/**
* Returns the IP address of the controller instance.
*
* @return IP address
*/
IpAddress ip();
}
package org.onlab.onos.cluster;
/**
* Controller cluster identity.
*/
public interface InstanceId {
}
/**
* Set of abstractions for dealing with controller cluster related topics.
*/
package org.onlab.onos.cluster;
\ No newline at end of file
......@@ -19,7 +19,7 @@ public class FlowRuleEvent extends AbstractEvent<FlowRuleEvent.Type, FlowRule> {
/**
* Signifies that a flow rule has been removed.
*/
RULE_REMOVED,
RULE_REMOVED
}
/**
......
......@@ -23,6 +23,8 @@ public interface FlowRuleService {
*/
Iterable<FlowEntry> getFlowEntries(DeviceId deviceId);
// TODO: add createFlowRule factory method and execute operations method
/**
* Applies the specified flow rules onto their respective devices. These
* flow rules will be retained by the system and re-applied anytime the
......@@ -46,9 +48,6 @@ public interface FlowRuleService {
void removeFlowRules(FlowRule... flowRules);
// void addInitialFlowContributor(InitialFlowContributor contributor);
// void removeInitialFlowContributor(InitialFlowContributor contributor);
/**
* Adds the specified flow rule listener.
*
......
......@@ -7,6 +7,7 @@
description="ONOS 3rd party dependencies">
<bundle>mvn:commons-lang/commons-lang/2.6</bundle>
<bundle>mvn:com.google.guava/guava/18.0</bundle>
<bundle>mvn:io.netty/netty/3.9.2.Final</bundle>
</feature>
<feature name="onos-thirdparty-web" version="1.0.0"
......@@ -18,6 +19,7 @@
<bundle>mvn:com.sun.jersey/jersey-core/1.18.1</bundle>
<bundle>mvn:com.sun.jersey/jersey-server/1.18.1</bundle>
<bundle>mvn:com.sun.jersey/jersey-servlet/1.18.1</bundle>
</feature>
<feature name="onos-api" version="1.0.0"
......@@ -61,7 +63,6 @@
description="ONOS OpenFlow API, Controller &amp; Providers">
<feature>onos-api</feature>
<bundle>mvn:io.netty/netty/3.9.2.Final</bundle>
<bundle>mvn:org.onlab.onos/onos-of-api/1.0.0-SNAPSHOT</bundle>
<bundle>mvn:org.onlab.onos/onos-of-ctl/1.0.0-SNAPSHOT</bundle>
......@@ -75,8 +76,9 @@
<feature name="onos-app-tvue" version="1.0.0"
description="ONOS sample topology viewer application">
<feature>onos-api</feature>
<feature>onos-thirdparty-web</feature>
<feature>onos-api</feature>
<feature>onos-core</feature>
<bundle>mvn:org.onlab.onos/onos-app-tvue/1.0.0-SNAPSHOT</bundle>
</feature>
......
......@@ -78,6 +78,20 @@ public interface OpenFlowController {
public void removePacketListener(PacketListener listener);
/**
* Register a listener for OF msg events.
*
* @param listener the listener to notify
*/
public void addEventListener(OpenFlowEventListener listener);
/**
* Unregister a listener.
*
* @param listener the listener to unregister
*/
public void removeEventListener(OpenFlowEventListener listener);
/**
* Send a message to a particular switch.
* @param dpid the switch to send to.
* @param msg the message to send
......
package org.onlab.onos.openflow.controller;
import org.projectfloodlight.openflow.protocol.OFMessage;
/**
* Notifies providers about openflow msg events.
*/
public interface OpenFlowEventListener {
/**
* Handles the message event.
*
* @param msg the message
*/
public void handleMessage(Dpid dpid, OFMessage msg);
}
......@@ -63,4 +63,16 @@ public class OpenflowControllerAdapter implements OpenFlowController {
@Override
public void setRole(Dpid dpid, RoleState role) {
}
@Override
public void addEventListener(OpenFlowEventListener listener) {
// TODO Auto-generated method stub
}
@Override
public void removeEventListener(OpenFlowEventListener listener) {
// TODO Auto-generated method stub
}
}
......
......@@ -15,107 +15,27 @@
<description>ONOS OpenFlow controller subsystem API</description>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<powermock.version>1.5.5</powermock.version>
<restlet.version>2.1.4</restlet.version>
<cobertura-maven-plugin.version>2.6</cobertura-maven-plugin.version>
<!-- Following 2 findbugs version needs to be updated in sync to match the
findbugs version used in findbugs-plugin -->
<findbugs.version>3.0.0</findbugs.version>
<findbugs-plugin.version>3.0.0</findbugs-plugin.version>
<findbugs.effort>Max</findbugs.effort>
<findbugs.excludeFilterFile>${project.basedir}/conf/findbugs/exclude.xml
</findbugs.excludeFilterFile>
<checkstyle-plugin.version>2.12</checkstyle-plugin.version>
<!-- To publish javadoc to github,
uncomment com.github.github site-maven-plugin and
see https://github.com/OPENNETWORKINGLAB/ONOS/pull/425
<github.global.server>github</github.global.server>
-->
<metrics.version>3.0.2</metrics.version>
<maven.surefire.plugin.version>2.16</maven.surefire.plugin.version>
</properties>
<dependencies>
<dependency>
<groupId>org.onlab.onos</groupId>
<artifactId>onos-of-api</artifactId>
</dependency>
<!-- ONOS's direct dependencies -->
<dependency>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
<version>1.9.6</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.1.2</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<version>1.7.5</version>
</dependency>
<dependency>
<!-- findbugs suppression annotation and @GuardedBy, etc. -->
<groupId>com.google.code.findbugs</groupId>
<artifactId>annotations</artifactId>
<version>${findbugs.version}</version>
</dependency>
<dependency>
<groupId>org.projectfloodlight</groupId>
<artifactId>openflowj</artifactId>
<version>0.3.8-SNAPSHOT</version>
</dependency>
<!-- Floodlight's dependencies -->
<dependency>
<!-- dependency to old version of netty? -->
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
<version>3.9.2.Final</version>
</dependency>
<!-- Dependency for libraries used for testing -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.11</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.easymock</groupId>
<artifactId>easymock</artifactId>
<version>3.2</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-module-junit4</artifactId>
<version>${powermock.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.powermock</groupId>
<artifactId>powermock-api-easymock</artifactId>
<version>${powermock.version}</version>
<scope>test</scope>
<groupId>org.apache.felix</groupId>
<artifactId>org.apache.felix.scr.annotations</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.felix</groupId>
<artifactId>maven-scr-plugin</artifactId>
</plugin>
</plugins>
</build>
......
package org.onlab.onos.openflow.controller.impl;
import static org.onlab.util.Tools.namedThreads;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
......@@ -13,6 +17,7 @@ import org.apache.felix.scr.annotations.Service;
import org.onlab.onos.openflow.controller.DefaultOpenFlowPacketContext;
import org.onlab.onos.openflow.controller.Dpid;
import org.onlab.onos.openflow.controller.OpenFlowController;
import org.onlab.onos.openflow.controller.OpenFlowEventListener;
import org.onlab.onos.openflow.controller.OpenFlowPacketContext;
import org.onlab.onos.openflow.controller.OpenFlowSwitch;
import org.onlab.onos.openflow.controller.OpenFlowSwitchListener;
......@@ -27,6 +32,7 @@ import org.slf4j.LoggerFactory;
import com.google.common.collect.ArrayListMultimap;
import com.google.common.collect.Multimap;
import com.google.common.collect.Sets;
@Component(immediate = true)
@Service
......@@ -35,6 +41,10 @@ public class OpenFlowControllerImpl implements OpenFlowController {
private static final Logger log =
LoggerFactory.getLogger(OpenFlowControllerImpl.class);
private final ExecutorService executor = Executors.newFixedThreadPool(16,
namedThreads("of-event-%d"));
protected ConcurrentHashMap<Dpid, OpenFlowSwitch> connectedSwitches =
new ConcurrentHashMap<Dpid, OpenFlowSwitch>();
protected ConcurrentHashMap<Dpid, OpenFlowSwitch> activeMasterSwitches =
......@@ -43,11 +53,12 @@ public class OpenFlowControllerImpl implements OpenFlowController {
new ConcurrentHashMap<Dpid, OpenFlowSwitch>();
protected OpenFlowSwitchAgent agent = new OpenFlowSwitchAgent();
protected Set<OpenFlowSwitchListener> ofEventListener = new HashSet<>();
protected Set<OpenFlowSwitchListener> ofSwitchListener = new HashSet<>();
protected Multimap<Integer, PacketListener> ofPacketListener =
ArrayListMultimap.create();
protected Set<OpenFlowEventListener> ofEventListener = Sets.newHashSet();
private final Controller ctrl = new Controller();
......@@ -93,14 +104,14 @@ public class OpenFlowControllerImpl implements OpenFlowController {
@Override
public void addListener(OpenFlowSwitchListener listener) {
if (!ofEventListener.contains(listener)) {
this.ofEventListener.add(listener);
if (!ofSwitchListener.contains(listener)) {
this.ofSwitchListener.add(listener);
}
}
@Override
public void removeListener(OpenFlowSwitchListener listener) {
this.ofEventListener.remove(listener);
this.ofSwitchListener.remove(listener);
}
@Override
......@@ -114,6 +125,16 @@ public class OpenFlowControllerImpl implements OpenFlowController {
}
@Override
public void addEventListener(OpenFlowEventListener listener) {
ofEventListener.add(listener);
}
@Override
public void removeEventListener(OpenFlowEventListener listener) {
ofEventListener.remove(listener);
}
@Override
public void write(Dpid dpid, OFMessage msg) {
this.getSwitch(dpid).sendMsg(msg);
}
......@@ -122,7 +143,7 @@ public class OpenFlowControllerImpl implements OpenFlowController {
public void processPacket(Dpid dpid, OFMessage msg) {
switch (msg.getType()) {
case PORT_STATUS:
for (OpenFlowSwitchListener l : ofEventListener) {
for (OpenFlowSwitchListener l : ofSwitchListener) {
l.portChanged(dpid, (OFPortStatus) msg);
}
break;
......@@ -134,6 +155,12 @@ public class OpenFlowControllerImpl implements OpenFlowController {
p.handlePacket(pktCtx);
}
break;
case FLOW_REMOVED:
case ERROR:
case STATS_REPLY:
case BARRIER_REPLY:
executor.submit(new OFMessageHandler(dpid, msg));
break;
default:
log.warn("Handling message type {} not yet implemented {}",
msg.getType(), msg);
......@@ -164,7 +191,7 @@ public class OpenFlowControllerImpl implements OpenFlowController {
} else {
log.error("Added switch {}", dpid);
connectedSwitches.put(dpid, sw);
for (OpenFlowSwitchListener l : ofEventListener) {
for (OpenFlowSwitchListener l : ofSwitchListener) {
l.switchAdded(dpid);
}
return true;
......@@ -277,7 +304,7 @@ public class OpenFlowControllerImpl implements OpenFlowController {
if (sw == null) {
sw = activeEqualSwitches.remove(dpid);
}
for (OpenFlowSwitchListener l : ofEventListener) {
for (OpenFlowSwitchListener l : ofSwitchListener) {
l.switchRemoved(dpid);
}
}
......@@ -288,5 +315,23 @@ public class OpenFlowControllerImpl implements OpenFlowController {
}
}
private final class OFMessageHandler implements Runnable {
private final OFMessage msg;
private final Dpid dpid;
public OFMessageHandler(Dpid dpid, OFMessage msg) {
this.msg = msg;
this.dpid = dpid;
}
@Override
public void run() {
for (OpenFlowEventListener listener : ofEventListener) {
listener.handleMessage(dpid, msg);
}
}
}
}
......
package org.onlab.onos.provider.of.flow.impl;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import org.onlab.onos.net.flow.FlowRule;
import org.onlab.onos.net.flow.TrafficSelector;
import org.onlab.onos.net.flow.TrafficTreatment;
import org.onlab.onos.net.flow.criteria.Criteria.EthCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.EthTypeCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.IPCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.IPProtocolCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.PortCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanIdCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanPcpCriterion;
import org.onlab.onos.net.flow.criteria.Criterion;
import org.onlab.onos.net.flow.instructions.Instruction;
import org.onlab.onos.net.flow.instructions.Instructions.OutputInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModEtherInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModVlanIdInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModVlanPcpInstruction;
import org.onlab.onos.net.flow.instructions.L3ModificationInstruction;
import org.onlab.onos.net.flow.instructions.L3ModificationInstruction.ModIPInstruction;
import org.projectfloodlight.openflow.protocol.OFFactory;
import org.projectfloodlight.openflow.protocol.OFFlowMod;
import org.projectfloodlight.openflow.protocol.OFFlowModFlags;
import org.projectfloodlight.openflow.protocol.action.OFAction;
import org.projectfloodlight.openflow.protocol.match.Match;
import org.projectfloodlight.openflow.protocol.match.MatchField;
import org.projectfloodlight.openflow.types.EthType;
import org.projectfloodlight.openflow.types.IPv4Address;
import org.projectfloodlight.openflow.types.IpProtocol;
import org.projectfloodlight.openflow.types.MacAddress;
import org.projectfloodlight.openflow.types.Masked;
import org.projectfloodlight.openflow.types.OFBufferId;
import org.projectfloodlight.openflow.types.OFPort;
import org.projectfloodlight.openflow.types.OFVlanVidMatch;
import org.projectfloodlight.openflow.types.VlanPcp;
import org.projectfloodlight.openflow.types.VlanVid;
import org.slf4j.Logger;
public class FlowModBuilder {
private final Logger log = getLogger(getClass());
private final OFFactory factory;
private final TrafficTreatment treatment;
private final TrafficSelector selector;
private final int priority;
public FlowModBuilder(FlowRule flowRule, OFFactory factory) {
this.factory = factory;
this.treatment = flowRule.treatment();
this.selector = flowRule.selector();
this.priority = flowRule.priority();
}
public OFFlowMod buildFlowMod() {
Match match = buildMatch();
List<OFAction> actions = buildActions();
//TODO: what to do without bufferid? do we assume that there will be a pktout as well?
OFFlowMod fm = factory.buildFlowModify()
.setBufferId(OFBufferId.NO_BUFFER)
.setActions(actions)
.setMatch(match)
.setFlags(Collections.singleton(OFFlowModFlags.SEND_FLOW_REM))
.setIdleTimeout(10)
.setHardTimeout(10)
.setPriority(priority)
.build();
return fm;
}
private List<OFAction> buildActions() {
List<OFAction> acts = new LinkedList<>();
for (Instruction i : treatment.instructions()) {
switch (i.type()) {
case DROP:
log.warn("Saw drop action; assigning drop action");
return new LinkedList<>();
case L2MODIFICATION:
acts.add(buildL2Modification(i));
case L3MODIFICATION:
acts.add(buildL3Modification(i));
case OUTPUT:
OutputInstruction out = (OutputInstruction) i;
acts.add(factory.actions().buildOutput().setPort(
OFPort.of((int) out.port().toLong())).build());
break;
case GROUP:
default:
log.warn("Instruction type {} not yet implemented.", i.type());
}
}
return acts;
}
private OFAction buildL3Modification(Instruction i) {
L3ModificationInstruction l3m = (L3ModificationInstruction) i;
ModIPInstruction ip;
switch (l3m.subtype()) {
case L3_DST:
ip = (ModIPInstruction) i;
return factory.actions().setNwDst(IPv4Address.of(ip.ip().toInt()));
case L3_SRC:
ip = (ModIPInstruction) i;
return factory.actions().setNwSrc(IPv4Address.of(ip.ip().toInt()));
default:
log.warn("Unimplemented action type {}.", l3m.subtype());
break;
}
return null;
}
private OFAction buildL2Modification(Instruction i) {
L2ModificationInstruction l2m = (L2ModificationInstruction) i;
ModEtherInstruction eth;
switch (l2m.subtype()) {
case L2_DST:
eth = (ModEtherInstruction) l2m;
return factory.actions().setDlDst(MacAddress.of(eth.mac().toLong()));
case L2_SRC:
eth = (ModEtherInstruction) l2m;
return factory.actions().setDlSrc(MacAddress.of(eth.mac().toLong()));
case VLAN_ID:
ModVlanIdInstruction vlanId = (ModVlanIdInstruction) l2m;
return factory.actions().setVlanVid(VlanVid.ofVlan(vlanId.vlanId.toShort()));
case VLAN_PCP:
ModVlanPcpInstruction vlanPcp = (ModVlanPcpInstruction) l2m;
return factory.actions().setVlanPcp(VlanPcp.of(vlanPcp.vlanPcp()));
default:
log.warn("Unimplemented action type {}.", l2m.subtype());
break;
}
return null;
}
private Match buildMatch() {
Match.Builder mBuilder = factory.buildMatch();
EthCriterion eth;
IPCriterion ip;
for (Criterion c : selector.criteria()) {
switch (c.type()) {
case IN_PORT:
PortCriterion inport = (PortCriterion) c;
mBuilder.setExact(MatchField.IN_PORT, OFPort.of((int) inport.port().toLong()));
break;
case ETH_SRC:
eth = (EthCriterion) c;
mBuilder.setExact(MatchField.ETH_SRC, MacAddress.of(eth.mac().toLong()));
break;
case ETH_DST:
eth = (EthCriterion) c;
mBuilder.setExact(MatchField.ETH_DST, MacAddress.of(eth.mac().toLong()));
break;
case ETH_TYPE:
EthTypeCriterion ethType = (EthTypeCriterion) c;
mBuilder.setExact(MatchField.ETH_TYPE, EthType.of(ethType.ethType()));
break;
case IPV4_DST:
ip = (IPCriterion) c;
if (ip.ip().isMasked()) {
Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toInt()),
IPv4Address.of(ip.ip().netmask().toInt()));
mBuilder.setMasked(MatchField.IPV4_DST, maskedIp);
} else {
mBuilder.setExact(MatchField.IPV4_DST, IPv4Address.of(ip.ip().toInt()));
}
break;
case IPV4_SRC:
ip = (IPCriterion) c;
if (ip.ip().isMasked()) {
Masked<IPv4Address> maskedIp = Masked.of(IPv4Address.of(ip.ip().toInt()),
IPv4Address.of(ip.ip().netmask().toInt()));
mBuilder.setMasked(MatchField.IPV4_SRC, maskedIp);
} else {
mBuilder.setExact(MatchField.IPV4_SRC, IPv4Address.of(ip.ip().toInt()));
}
break;
case IP_PROTO:
IPProtocolCriterion p = (IPProtocolCriterion) c;
mBuilder.setExact(MatchField.IP_PROTO, IpProtocol.of(p.protocol()));
break;
case VLAN_PCP:
VlanPcpCriterion vpcp = (VlanPcpCriterion) c;
mBuilder.setExact(MatchField.VLAN_PCP, VlanPcp.of(vpcp.priority()));
break;
case VLAN_VID:
VlanIdCriterion vid = (VlanIdCriterion) c;
mBuilder.setExact(MatchField.VLAN_VID,
OFVlanVidMatch.ofVlanVid(VlanVid.ofVlan(vid.vlanId().toShort())));
break;
case ARP_OP:
case ARP_SHA:
case ARP_SPA:
case ARP_THA:
case ARP_TPA:
case ICMPV4_CODE:
case ICMPV4_TYPE:
case ICMPV6_CODE:
case ICMPV6_TYPE:
case IN_PHY_PORT:
case IPV6_DST:
case IPV6_EXTHDR:
case IPV6_FLABEL:
case IPV6_ND_SLL:
case IPV6_ND_TARGET:
case IPV6_ND_TLL:
case IPV6_SRC:
case IP_DSCP:
case IP_ECN:
case METADATA:
case MPLS_BOS:
case MPLS_LABEL:
case MPLS_TC:
case PBB_ISID:
case SCTP_DST:
case SCTP_SRC:
case TCP_DST:
case TCP_SRC:
case TUNNEL_ID:
case UDP_DST:
case UDP_SRC:
default:
log.warn("Match type {} not yet implemented.", c.type());
}
}
return mBuilder.build();
}
}
package org.onlab.onos.provider.of.flow.impl;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.concurrent.TimeUnit;
import org.jboss.netty.util.HashedWheelTimer;
import org.jboss.netty.util.Timeout;
import org.jboss.netty.util.TimerTask;
import org.onlab.onos.openflow.controller.OpenFlowSwitch;
import org.onlab.util.Timer;
import org.projectfloodlight.openflow.protocol.OFFlowStatsRequest;
import org.projectfloodlight.openflow.types.OFPort;
import org.projectfloodlight.openflow.types.TableId;
import org.slf4j.Logger;
public class FlowStatsCollector implements TimerTask {
private final Logger log = getLogger(getClass());
private final HashedWheelTimer timer = Timer.getTimer();
private final OpenFlowSwitch sw;
private final int refreshInterval;
private Timeout timeout;
private boolean stopTimer = false;;
public FlowStatsCollector(OpenFlowSwitch sw, int refreshInterval) {
this.sw = sw;
this.refreshInterval = refreshInterval;
}
@Override
public void run(Timeout timeout) throws Exception {
log.debug("Collecting stats for {}", this.sw.getStringId());
sendFlowStatistics();
if (!this.stopTimer) {
log.debug("Scheduling stats collection in {} seconds for {}",
this.refreshInterval, this.sw.getStringId());
timeout.getTimer().newTimeout(this, refreshInterval,
TimeUnit.SECONDS);
}
}
private void sendFlowStatistics() {
OFFlowStatsRequest request = sw.factory().buildFlowStatsRequest()
.setMatch(sw.factory().matchWildcardAll())
.setTableId(TableId.ALL)
.setOutPort(OFPort.NO_MASK)
.build();
this.sw.sendMsg(request);
}
public void start() {
/*
* Initially start polling quickly. Then drop down to configured value
*/
log.info("Starting Stats collection thread for {}",
this.sw.getStringId());
timeout = timer.newTimeout(this, 1, TimeUnit.SECONDS);
}
public void stop() {
log.info("Stopping Stats collection thread for {}",
this.sw.getStringId());
this.stopTimer = true;
timeout.cancel();
}
}
......@@ -2,9 +2,7 @@ package org.onlab.onos.provider.of.flow.impl;
import static org.slf4j.LoggerFactory.getLogger;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import org.apache.felix.scr.annotations.Activate;
import org.apache.felix.scr.annotations.Component;
......@@ -12,50 +10,27 @@ import org.apache.felix.scr.annotations.Deactivate;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.ReferenceCardinality;
import org.onlab.onos.net.DeviceId;
import org.onlab.onos.net.flow.DefaultFlowRule;
import org.onlab.onos.net.flow.FlowEntry;
import org.onlab.onos.net.flow.FlowRule;
import org.onlab.onos.net.flow.FlowRuleProvider;
import org.onlab.onos.net.flow.FlowRuleProviderRegistry;
import org.onlab.onos.net.flow.FlowRuleProviderService;
import org.onlab.onos.net.flow.criteria.Criteria.EthCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.EthTypeCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.IPCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.IPProtocolCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.PortCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanIdCriterion;
import org.onlab.onos.net.flow.criteria.Criteria.VlanPcpCriterion;
import org.onlab.onos.net.flow.criteria.Criterion;
import org.onlab.onos.net.flow.instructions.Instruction;
import org.onlab.onos.net.flow.instructions.Instructions.OutputInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModEtherInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModVlanIdInstruction;
import org.onlab.onos.net.flow.instructions.L2ModificationInstruction.ModVlanPcpInstruction;
import org.onlab.onos.net.flow.instructions.L3ModificationInstruction;
import org.onlab.onos.net.flow.instructions.L3ModificationInstruction.ModIPInstruction;
import org.onlab.onos.net.provider.AbstractProvider;
import org.onlab.onos.net.provider.ProviderId;
import org.onlab.onos.net.topology.TopologyService;
import org.onlab.onos.openflow.controller.Dpid;
import org.onlab.onos.openflow.controller.OpenFlowController;
import org.onlab.onos.openflow.controller.OpenFlowEventListener;
import org.onlab.onos.openflow.controller.OpenFlowSwitch;
import org.projectfloodlight.openflow.protocol.OFFactory;
import org.projectfloodlight.openflow.protocol.OFFlowMod;
import org.projectfloodlight.openflow.protocol.OFFlowModFlags;
import org.projectfloodlight.openflow.protocol.action.OFAction;
import org.projectfloodlight.openflow.protocol.match.Match;
import org.projectfloodlight.openflow.protocol.match.MatchField;
import org.projectfloodlight.openflow.types.EthType;
import org.projectfloodlight.openflow.types.IPv4Address;
import org.projectfloodlight.openflow.types.IpProtocol;
import org.projectfloodlight.openflow.types.MacAddress;
import org.projectfloodlight.openflow.types.OFBufferId;
import org.projectfloodlight.openflow.types.OFPort;
import org.projectfloodlight.openflow.types.OFVlanVidMatch;
import org.projectfloodlight.openflow.types.VlanPcp;
import org.projectfloodlight.openflow.types.VlanVid;
import org.onlab.onos.openflow.controller.OpenFlowSwitchListener;
import org.projectfloodlight.openflow.protocol.OFFlowRemoved;
import org.projectfloodlight.openflow.protocol.OFMessage;
import org.projectfloodlight.openflow.protocol.OFPortStatus;
import org.slf4j.Logger;
import com.google.common.collect.Maps;
/**
* Provider which uses an OpenFlow controller to detect network
* end-station hosts.
......@@ -76,6 +51,8 @@ public class OpenFlowRuleProvider extends AbstractProvider implements FlowRulePr
private FlowRuleProviderService providerService;
private final InternalFlowProvider listener = new InternalFlowProvider();
/**
* Creates an OpenFlow host provider.
*/
......@@ -86,6 +63,8 @@ public class OpenFlowRuleProvider extends AbstractProvider implements FlowRulePr
@Activate
public void activate() {
providerService = providerRegistry.register(this);
controller.addListener(listener);
controller.addEventListener(listener);
log.info("Started");
}
......@@ -105,168 +84,10 @@ public class OpenFlowRuleProvider extends AbstractProvider implements FlowRulePr
private void applyRule(FlowRule flowRule) {
OpenFlowSwitch sw = controller.getSwitch(Dpid.dpid(flowRule.deviceId().uri()));
Match match = buildMatch(flowRule.selector().criteria(), sw.factory());
List<OFAction> actions =
buildActions(flowRule.treatment().instructions(), sw.factory());
//TODO: what to do without bufferid? do we assume that there will be a pktout as well?
OFFlowMod fm = sw.factory().buildFlowModify()
.setBufferId(OFBufferId.NO_BUFFER)
.setActions(actions)
.setMatch(match)
.setFlags(Collections.singleton(OFFlowModFlags.SEND_FLOW_REM))
.setIdleTimeout(10)
.setHardTimeout(10)
.setPriority(flowRule.priority())
.build();
sw.sendMsg(fm);
}
private List<OFAction> buildActions(List<Instruction> instructions, OFFactory factory) {
List<OFAction> acts = new LinkedList<>();
for (Instruction i : instructions) {
switch (i.type()) {
case DROP:
log.warn("Saw drop action; assigning drop action");
return new LinkedList<>();
case L2MODIFICATION:
acts.add(buildL2Modification(i, factory));
case L3MODIFICATION:
acts.add(buildL3Modification(i, factory));
case OUTPUT:
OutputInstruction out = (OutputInstruction) i;
acts.add(factory.actions().buildOutput().setPort(
OFPort.of((int) out.port().toLong())).build());
break;
case GROUP:
default:
log.warn("Instruction type {} not yet implemented.", i.type());
}
}
return acts;
sw.sendMsg(new FlowModBuilder(flowRule, sw.factory()).buildFlowMod());
}
private OFAction buildL3Modification(Instruction i, OFFactory factory) {
L3ModificationInstruction l3m = (L3ModificationInstruction) i;
ModIPInstruction ip;
switch (l3m.subtype()) {
case L3_DST:
ip = (ModIPInstruction) i;
return factory.actions().setNwDst(IPv4Address.of(ip.ip().toInt()));
case L3_SRC:
ip = (ModIPInstruction) i;
return factory.actions().setNwSrc(IPv4Address.of(ip.ip().toInt()));
default:
log.warn("Unimplemented action type {}.", l3m.subtype());
break;
}
return null;
}
private OFAction buildL2Modification(Instruction i, OFFactory factory) {
L2ModificationInstruction l2m = (L2ModificationInstruction) i;
ModEtherInstruction eth;
switch (l2m.subtype()) {
case L2_DST:
eth = (ModEtherInstruction) l2m;
return factory.actions().setDlDst(MacAddress.of(eth.mac().toLong()));
case L2_SRC:
eth = (ModEtherInstruction) l2m;
return factory.actions().setDlSrc(MacAddress.of(eth.mac().toLong()));
case VLAN_ID:
ModVlanIdInstruction vlanId = (ModVlanIdInstruction) l2m;
return factory.actions().setVlanVid(VlanVid.ofVlan(vlanId.vlanId.toShort()));
case VLAN_PCP:
ModVlanPcpInstruction vlanPcp = (ModVlanPcpInstruction) l2m;
return factory.actions().setVlanPcp(VlanPcp.of(vlanPcp.vlanPcp()));
default:
log.warn("Unimplemented action type {}.", l2m.subtype());
break;
}
return null;
}
private Match buildMatch(List<Criterion> criteria, OFFactory factory) {
Match.Builder mBuilder = factory.buildMatch();
EthCriterion eth;
IPCriterion ip;
for (Criterion c : criteria) {
switch (c.type()) {
case IN_PORT:
PortCriterion inport = (PortCriterion) c;
mBuilder.setExact(MatchField.IN_PORT, OFPort.of((int) inport.port().toLong()));
break;
case ETH_SRC:
eth = (EthCriterion) c;
mBuilder.setExact(MatchField.ETH_SRC, MacAddress.of(eth.mac().toLong()));
break;
case ETH_DST:
eth = (EthCriterion) c;
mBuilder.setExact(MatchField.ETH_DST, MacAddress.of(eth.mac().toLong()));
break;
case ETH_TYPE:
EthTypeCriterion ethType = (EthTypeCriterion) c;
mBuilder.setExact(MatchField.ETH_TYPE, EthType.of(ethType.ethType()));
break;
case IPV4_DST:
ip = (IPCriterion) c;
mBuilder.setExact(MatchField.IPV4_DST, IPv4Address.of(ip.ip().toInt()));
break;
case IPV4_SRC:
ip = (IPCriterion) c;
mBuilder.setExact(MatchField.IPV4_SRC, IPv4Address.of(ip.ip().toInt()));
break;
case IP_PROTO:
IPProtocolCriterion p = (IPProtocolCriterion) c;
mBuilder.setExact(MatchField.IP_PROTO, IpProtocol.of(p.protocol()));
break;
case VLAN_PCP:
VlanPcpCriterion vpcp = (VlanPcpCriterion) c;
mBuilder.setExact(MatchField.VLAN_PCP, VlanPcp.of(vpcp.priority()));
break;
case VLAN_VID:
VlanIdCriterion vid = (VlanIdCriterion) c;
mBuilder.setExact(MatchField.VLAN_VID,
OFVlanVidMatch.ofVlanVid(VlanVid.ofVlan(vid.vlanId().toShort())));
break;
case ARP_OP:
case ARP_SHA:
case ARP_SPA:
case ARP_THA:
case ARP_TPA:
case ICMPV4_CODE:
case ICMPV4_TYPE:
case ICMPV6_CODE:
case ICMPV6_TYPE:
case IN_PHY_PORT:
case IPV6_DST:
case IPV6_EXTHDR:
case IPV6_FLABEL:
case IPV6_ND_SLL:
case IPV6_ND_TARGET:
case IPV6_ND_TLL:
case IPV6_SRC:
case IP_DSCP:
case IP_ECN:
case METADATA:
case MPLS_BOS:
case MPLS_LABEL:
case MPLS_TC:
case PBB_ISID:
case SCTP_DST:
case SCTP_SRC:
case TCP_DST:
case TCP_SRC:
case TUNNEL_ID:
case UDP_DST:
case UDP_SRC:
default:
log.warn("Action type {} not yet implemented.", c.type());
}
}
return mBuilder.build();
}
@Override
public void removeFlowRule(FlowRule... flowRules) {
......@@ -283,6 +104,49 @@ public class OpenFlowRuleProvider extends AbstractProvider implements FlowRulePr
//TODO: InternalFlowRuleProvider listening to stats and error and flowremoved.
// possibly barriers as well. May not be internal at all...
private class InternalFlowProvider
implements OpenFlowSwitchListener, OpenFlowEventListener {
private final Map<Dpid, FlowStatsCollector> collectors = Maps.newHashMap();
@Override
public void switchAdded(Dpid dpid) {
FlowStatsCollector fsc = new FlowStatsCollector(controller.getSwitch(dpid), 1);
fsc.start();
collectors.put(dpid, fsc);
}
@Override
public void switchRemoved(Dpid dpid) {
collectors.remove(dpid).stop();
}
@Override
public void portChanged(Dpid dpid, OFPortStatus status) {
//TODO: Decide whether to evict flows internal store.
}
@Override
public void handleMessage(Dpid dpid, OFMessage msg) {
switch (msg.getType()) {
case FLOW_REMOVED:
//TODO: make this better
OFFlowRemoved removed = (OFFlowRemoved) msg;
FlowRule fr = new DefaultFlowRule(DeviceId.deviceId(Dpid.uri(dpid)), null, null);
providerService.flowRemoved(fr);
break;
case STATS_REPLY:
break;
case BARRIER_REPLY:
case ERROR:
default:
log.warn("Unhandled message type: {}", msg.getType());
}
}
}
}
......
......@@ -46,7 +46,7 @@ public class OpenFlowCorePacketContext extends DefaultPacketContext {
private void sendBufferedPacket() {
List<Instruction> ins = treatmentBuilder().build().instructions();
OFPort p = null;
//TODO: support arbitrary list of treatments
//TODO: support arbitrary list of treatments must be supported in ofPacketContext
for (Instruction i : ins) {
if (i.type() == Type.OUTPUT) {
p = buildPort(((OutputInstruction) i).port());
......
# Environmental defaults for ONOS build, package and test
# Root of the ONOS source tree
export ONOS_ROOT=${ONOS_ROOT:-~/onos-next}
# M2 repository and Karaf gold bits
export M2_REPO=${M2_REPO:-~/.m2/repository}
export KARAF_ZIP=${KARAF_ZIP:-~/Downloads/apache-karaf-3.0.1.zip}
export KARAF_DIST=$(basename $KARAF_ZIP .zip)
# ONOS Version and onos.tar.gz staging environment
export ONOS_VERSION=${ONOS_VERSION:-1.0.0-SNAPSHOT}
export ONOS_STAGE_ROOT=${ONOS_STAGE_ROOT:-/tmp}
export ONOS_BITS=onos-$ONOS_VERSION
export ONOS_STAGE=$ONOS_STAGE_ROOT/$ONOS_BITS
export ONOS_TAR=$ONOS_STAGE.tar.gz
# Defaults for ONOS testing using remote machines.
export ONOS_INSTALL_DIR="/opt/onos" # Installation directory on remote
export OCI="${OCI:-192.168.56.101}" # ONOS Controller Instance
export ONOS_USER="sdn" # ONOS user on remote system
export ONOS_PWD="rocks" # ONOS user password on remote system
......@@ -3,14 +3,8 @@
# Packages ONOS distributable into onos.tar.gz
#-------------------------------------------------------------------------------
export M2_REPO=${M2_REPO:-~/.m2/repository}
export KARAF_ZIP=${KARAF_ZIP:-~/Downloads/apache-karaf-3.0.1.zip}
export KARAF_DIST=$(basename $KARAF_ZIP .zip)
export ONOS_VERSION=${ONOS_VERSION:-1.0.0-SNAPSHOT}
export ONOS_STAGE_ROOT=${ONOS_STAGE_ROOT:-/tmp}
export ONOS_BITS=onos-$ONOS_VERSION
export ONOS_STAGE=$ONOS_STAGE_ROOT/$ONOS_BITS
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
# Bail on any errors
set -e
......@@ -26,29 +20,31 @@ rm -fr $ONOS_STAGE # Remove this when package script is completed
mkdir -p $ONOS_STAGE
cd $ONOS_STAGE
# Unroll the Apache Karaf bits and make the ONOS top-level directories.
unzip $KARAF_ZIP
# Unroll the Apache Karaf bits, prune them and make ONOS top-level directories.
unzip -q $KARAF_ZIP && rm -rf $KARAF_DIST/demos
mkdir bin
# Stage the ONOS admin scripts
# Stage the ONOS admin scripts and patch in Karaf service wrapper extras
cp -r $ONOS_ROOT/tools/package/bin .
cp -r $ONOS_ROOT/tools/package/wrapper/* $KARAF_DIST
# Stage the ONOS bundles
mkdir -p system/org/onlab
cp -r $M2_REPO/org/onlab system/org/
mkdir -p $KARAF_DIST/system/org/onlab
cp -r $M2_REPO/org/onlab $KARAF_DIST/system/org/
# Patch the Apache Karaf distribution file to add ONOS features repository
perl -pi.old -e "s|^(featuresRepositories=.*)|\1,mvn:org.onlab.onos/onos-features/$ONOS_VERSION/xml/features|" \
$ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
# Patch the Apache Karaf distribution file to load ONOS features
perl -pi.old -e 's|^(featuresBoot=.*)|\1,onos-api,onos-core,onos-cli,onos-rest,onos-gui,onos-openflow,onos-app-tvue|' \
/tmp/onos-1.0.0-SNAPSHOT/apache-karaf-3.0.1/etc/org.apache.karaf.features.cfg
perl -pi.old -e 's|^(featuresBoot=.*)|\1,wrapper,onos-api,onos-core,onos-cli,onos-rest,onos-gui,onos-openflow,onos-app-tvue|' \
$ONOS_STAGE/$KARAF_DIST/etc/org.apache.karaf.features.cfg
# Patch the Apache Karaf distribution with ONOS branding bundle
cp $M2_REPO/org/onlab/onos/onos-branding/$ONOS_VERSION/onos-branding-*.jar \
$ONOS_STAGE/apache-karaf-*/lib
$ONOS_STAGE/$KARAF_DIST/lib
# Now package up the ONOS tar file
cd $ONOS_STAGE_ROOT
tar zcf $ONOS_BITS.tar.gz $ONOS_BITS
COPYFILE_DISABLE=1 tar zcf $ONOS_TAR $ONOS_BITS
ls -l $ONOS_TAR >&2
......
......@@ -12,7 +12,8 @@ export KARAF_LOG=$KARAF/data/log/karaf.log
# Setup a path
export PS=":"
export PATH="$PATH:$ONOS_ROOT/tools/dev:$ONOS_ROOT/tools/package"
export PATH="$PATH:$ONOS_ROOT/tools/dev:$ONOS_ROOT/tools/build"
export PATH="$PATH:$ONOS_ROOT/tools/test/bin"
export PATH="$PATH:$MAVEN/bin:$KARAF/bin"
export PATH="$PATH:."
......@@ -39,3 +40,13 @@ alias pp='python -m json.tool'
# Short-hand to launch API docs and sample topology viewer GUI
alias docs='open $ONOS_ROOT/target/site/apidocs/index.html'
alias gui='open http://localhost:8181/onos/tvue'
# Miscellaneous
function spy {
ps -ef | egrep "$@" | grep -v egrep
}
function nuke {
spy | cut -c7-11 | xargs kill
}
......
Artifacts for packaging onos.tar.gz.
#!/bin/bash
#-------------------------------------------------------------------------------
# ONOS command-line client
#-------------------------------------------------------------------------------
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
cd $(dirname $0)/../apache-karaf-*/bin
./client -h localhost "$@"
......@@ -3,6 +3,8 @@
# Starts ONOS Apache Karaf container
#-------------------------------------------------------------------------------
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
cd $(dirname $0)/../apache-karaf-*/bin
./karaf "$@"
......
#! /bin/sh
# ------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
# If require, set the JAVA_HOME to launch the wrapper
#
#JAVA_HOME=
#
# Application
APP_NAME="onos"
APP_LONG_NAME="onos"
# Wrapper
WRAPPER_CMD="/opt/onos/apache-karaf-3.0.1/bin/${APP_NAME}-wrapper"
WRAPPER_CONF="/opt/onos/apache-karaf-3.0.1/etc/${APP_NAME}-wrapper.conf"
# Priority at which to run the wrapper. See "man nice" for valid priorities.
# nice is only used if a priority is specified.
PRIORITY=
# Location of the data folder.
DATADIR="/opt/onos/apache-karaf-3.0.1/data"
# Location of the pid file.
PIDDIR="/opt/onos/apache-karaf-3.0.1/data"
# If uncommented, causes the Wrapper to be shutdown using an anchor file.
# When launched with the 'start' command, it will also ignore all INT and
# TERM signals.
#IGNORE_SIGNALS=true
# If specified, the Wrapper will be run as the specified user.
# IMPORTANT - Make sure that the user has the required privileges to write
# the PID file and wrapper.log files. Failure to be able to write the log
# file will cause the Wrapper to exit without any way to write out an error
# message.
# NOTE - This will set the user which is used to run the Wrapper as well as
# the JVM and is not useful in situations where a privileged resource or
# port needs to be allocated prior to the user being changed.
#RUN_AS_USER=
# The following two lines are used by the chkconfig command. Change as is
# appropriate for your application. They should remain commented.
# chkconfig: 2345 20 80
# description: onos
# Do not modify anything beyond this point
#-----------------------------------------------------------------------------
# Get the fully qualified path to the script
case $0 in
/*)
SCRIPT="$0"
;;
*)
PWD=`pwd`
SCRIPT="$PWD/$0"
;;
esac
# Resolve the true real path without any sym links.
CHANGED=true
while [ "X$CHANGED" != "X" ]
do
# Change spaces to ":" so the tokens can be parsed.
SCRIPT=`echo $SCRIPT | sed -e 's; ;:;g'`
# Get the real path to this script, resolving any symbolic links
TOKENS=`echo $SCRIPT | sed -e 's;/; ;g'`
REALPATH=
for C in $TOKENS; do
REALPATH="$REALPATH/$C"
while [ -h "$REALPATH" ] ; do
LS="`ls -ld "$REALPATH"`"
LINK="`expr "$LS" : '.*-> \(.*\)$'`"
if expr "$LINK" : '/.*' > /dev/null; then
REALPATH="$LINK"
else
REALPATH="`dirname "$REALPATH"`""/$LINK"
fi
done
done
# Change ":" chars back to spaces.
REALPATH=`echo $REALPATH | sed -e 's;:; ;g'`
if [ "$REALPATH" = "$SCRIPT" ]
then
CHANGED=""
else
SCRIPT="$REALPATH"
fi
done
# Change the current directory to the location of the script
cd "`dirname "$REALPATH"`"
REALDIR=`pwd`
# If the PIDDIR is relative, set its value relative to the full REALPATH to avoid problems if
# the working directory is later changed.
FIRST_CHAR=`echo $PIDDIR | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
PIDDIR=$REALDIR/$PIDDIR
fi
# Same test for WRAPPER_CMD
FIRST_CHAR=`echo $WRAPPER_CMD | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
WRAPPER_CMD=$REALDIR/$WRAPPER_CMD
fi
# Same test for WRAPPER_CONF
FIRST_CHAR=`echo $WRAPPER_CONF | cut -c1,1`
if [ "$FIRST_CHAR" != "/" ]
then
WRAPPER_CONF=$REALDIR/$WRAPPER_CONF
fi
# Process ID
ANCHORFILE="$PIDDIR/$APP_NAME.anchor"
PIDFILE="$PIDDIR/$APP_NAME.pid"
LOCKDIR="/var/lock/subsys"
LOCKFILE="$LOCKDIR/$APP_NAME"
pid=""
# Resolve the location of the 'ps' command
PSEXE="/usr/bin/ps"
if [ ! -x $PSEXE ]
then
PSEXE="/bin/ps"
if [ ! -x $PSEXE ]
then
echo "Unable to locate 'ps'."
echo "Please report this message along with the location of the command on your system."
exit 1
fi
fi
# Resolve the os
DIST_OS=`uname -s | tr [:upper:] [:lower:] | tr -d [:blank:]`
case "$DIST_OS" in
'sunos')
DIST_OS="solaris"
;;
'hp-ux' | 'hp-ux64')
DIST_OS="hpux"
;;
'darwin')
DIST_OS="macosx"
;;
'unix_sv')
DIST_OS="unixware"
;;
esac
# Resolve the architecture
DIST_ARCH=`uname -p | tr [:upper:] [:lower:] | tr -d [:blank:]`
if [ "$DIST_ARCH" = "unknown" ]
then
DIST_ARCH=`uname -m | tr [:upper:] [:lower:] | tr -d [:blank:]`
fi
case "$DIST_ARCH" in
'amd64' | 'ia32' | 'ia64' | 'i386' | 'i486' | 'i586' | 'i686' | 'x86_64')
DIST_ARCH="x86"
;;
'ip27')
DIST_ARCH="mips"
;;
'power' | 'powerpc' | 'power_pc' | 'ppc64')
DIST_ARCH="ppc"
;;
'pa_risc' | 'pa-risc')
DIST_ARCH="parisc"
;;
'sun4u' | 'sparcv9')
DIST_ARCH="sparc"
;;
'9000/800')
DIST_ARCH="parisc"
;;
esac
# Decide on the wrapper binary to use.
# If a 32-bit wrapper binary exists then it will work on 32 or 64 bit
# platforms, if the 64-bit binary exists then the distribution most
# likely wants to use long names. Otherwise, look for the default.
# For macosx, we also want to look for universal binaries.
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
if [ -x $WRAPPER_TEST_CMD ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ "$DIST_OS" = "macosx" ]
then
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-universal-32"
if [ -x $WRAPPER_TEST_CMD ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
if [ -x $WRAPPER_TEST_CMD ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-universal-64"
if [ -x $WRAPPER_TEST_CMD ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ ! -x $WRAPPER_CMD ]
then
echo "Unable to locate any of the following binaries:"
echo " $WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
echo " $WRAPPER_CMD-$DIST_OS-universal-32"
echo " $WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
echo " $WRAPPER_CMD-$DIST_OS-universal-64"
echo " $WRAPPER_CMD"
exit 1
fi
fi
fi
fi
else
WRAPPER_TEST_CMD="$WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
if [ -x $WRAPPER_TEST_CMD ]
then
WRAPPER_CMD="$WRAPPER_TEST_CMD"
else
if [ ! -x $WRAPPER_CMD ]
then
echo "Unable to locate any of the following binaries:"
echo " $WRAPPER_CMD-$DIST_OS-$DIST_ARCH-32"
echo " $WRAPPER_CMD-$DIST_OS-$DIST_ARCH-64"
echo " $WRAPPER_CMD"
exit 1
fi
fi
fi
fi
# Build the nice clause
if [ "X$PRIORITY" = "X" ]
then
CMDNICE=""
else
CMDNICE="nice -$PRIORITY"
fi
# Build the anchor file clause.
if [ "X$IGNORE_SIGNALS" = "X" ]
then
ANCHORPROP=
IGNOREPROP=
else
ANCHORPROP=wrapper.anchorfile=$ANCHORFILE
IGNOREPROP=wrapper.ignore_signals=TRUE
fi
# Build the lock file clause. Only create a lock file if the lock directory exists on this platform.
if [ -d $LOCKDIR ]
then
LOCKPROP=wrapper.lockfile=$LOCKFILE
else
LOCKPROP=
fi
checkUser() {
# Check the configured user. If necessary rerun this script as the desired user.
if [ "X$RUN_AS_USER" != "X" ]
then
# Resolve the location of the 'id' command
IDEXE="/usr/xpg4/bin/id"
if [ ! -x $IDEXE ]
then
IDEXE="/usr/bin/id"
if [ ! -x $IDEXE ]
then
echo "Unable to locate 'id'."
echo "Please report this message along with the location of the command on your system."
exit 1
fi
fi
if [ "`$IDEXE -u -n`" = "$RUN_AS_USER" ]
then
# Already running as the configured user. Avoid password prompts by not calling su.
RUN_AS_USER=""
fi
fi
if [ "X$RUN_AS_USER" != "X" ]
then
# If LOCKPROP and $RUN_AS_USER are defined then the new user will most likely not be
# able to create the lock file. The Wrapper will be able to update this file once it
# is created but will not be able to delete it on shutdown. If $2 is defined then
# the lock file should be created for the current command
if [ "X$LOCKPROP" != "X" ]
then
if [ "X$2" != "X" ]
then
# Resolve the primary group
RUN_AS_GROUP=`groups $RUN_AS_USER | awk '{print $3}' | tail -1`
if [ "X$RUN_AS_GROUP" = "X" ]
then
RUN_AS_GROUP=$RUN_AS_USER
fi
touch $LOCKFILE
chown $RUN_AS_USER:$RUN_AS_GROUP $LOCKFILE
fi
fi
# Still want to change users, recurse. This means that the user will only be
# prompted for a password once.
su -m $RUN_AS_USER -s /bin/sh -c "$REALPATH $1"
RETVAL=$?
# Now that we are the original user again, we may need to clean up the lock file.
if [ "X$LOCKPROP" != "X" ]
then
getpid
if [ "X$pid" = "X" ]
then
# Wrapper is not running so make sure the lock file is deleted.
if [ -f $LOCKFILE ]
then
rm $LOCKFILE
fi
fi
fi
exit $RETVAL
fi
}
getpid() {
if [ -f $PIDFILE ]
then
if [ -r $PIDFILE ]
then
pid=`cat $PIDFILE`
if [ "X$pid" != "X" ]
then
# It is possible that 'a' process with the pid exists but that it is not the
# correct process. This can happen in a number of cases, but the most
# common is during system startup after an unclean shutdown.
# The ps statement below looks for the specific wrapper command running as
# the pid. If it is not found then the pid file is considered to be stale.
if [ "$DIST_OS" = "solaris" ]
then
pidtest=`$PSEXE -p $pid -o comm | grep $WRAPPER_CMD | tail -1`
else
pidtest=`$PSEXE -p $pid -o command | grep $WRAPPER_CMD | tail -1`
fi
if [ "X$pidtest" = "X" ]
then
# This is a stale pid file.
rm -f $PIDFILE
echo "Removed stale pid file: $PIDFILE"
pid=""
fi
fi
else
echo "Cannot read $PIDFILE."
exit 1
fi
fi
}
testpid() {
pid=`$PSEXE -p $pid | grep $pid | grep -v grep | awk '{print $1}' | tail -1`
if [ "X$pid" = "X" ]
then
# Process is gone so remove the pid file.
rm -f $PIDFILE
pid=""
fi
}
console() {
echo "Running $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
COMMAND_LINE="$CMDNICE $WRAPPER_CMD $WRAPPER_CONF wrapper.syslog.ident=$APP_NAME wrapper.pidfile=$PIDFILE $ANCHORPROP $LOCKPROP"
exec $COMMAND_LINE
else
echo "$APP_LONG_NAME is already running."
exit 1
fi
}
start() {
echo "Starting $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
if [ ! -d $DATADIR ]; then
mkdir $DATADIR
fi
if [ ! -d $DATADIR/log ]; then
mkdir $DATADIR/log
fi
COMMAND_LINE="$CMDNICE $WRAPPER_CMD $WRAPPER_CONF wrapper.syslog.ident=$APP_NAME wrapper.pidfile=$PIDFILE wrapper.daemonize=TRUE $ANCHORPROP $IGNOREPROP $LOCKPROP"
exec $COMMAND_LINE
else
echo "$APP_LONG_NAME is already running."
exit 1
fi
}
stopit() {
echo "Stopping $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME was not running."
else
if [ "X$IGNORE_SIGNALS" = "X" ]
then
# Running so try to stop it.
kill $pid
if [ $? -ne 0 ]
then
# An explanation for the failure should have been given
echo "Unable to stop $APP_LONG_NAME."
exit 1
fi
else
rm -f $ANCHORFILE
if [ -f $ANCHORFILE ]
then
# An explanation for the failure should have been given
echo "Unable to stop $APP_LONG_NAME."
exit 1
fi
fi
# We can not predict how long it will take for the wrapper to
# actually stop as it depends on settings in wrapper.conf.
# Loop until it does.
savepid=$pid
CNT=0
TOTCNT=0
while [ "X$pid" != "X" ]
do
# Show a waiting message every 5 seconds.
if [ "$CNT" -lt "5" ]
then
CNT=`expr $CNT + 1`
else
echo "Waiting for $APP_LONG_NAME to exit..."
CNT=0
fi
TOTCNT=`expr $TOTCNT + 1`
sleep 1
testpid
done
pid=$savepid
testpid
if [ "X$pid" != "X" ]
then
echo "Failed to stop $APP_LONG_NAME."
exit 1
else
echo "Stopped $APP_LONG_NAME."
fi
fi
}
status() {
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME is not running."
exit 1
else
echo "$APP_LONG_NAME is running ($pid)."
exit 0
fi
}
dump() {
echo "Dumping $APP_LONG_NAME..."
getpid
if [ "X$pid" = "X" ]
then
echo "$APP_LONG_NAME was not running."
else
kill -3 $pid
if [ $? -ne 0 ]
then
echo "Failed to dump $APP_LONG_NAME."
exit 1
else
echo "Dumped $APP_LONG_NAME."
fi
fi
}
case "$1" in
'console')
checkUser $1 touchlock
console
;;
'start')
checkUser $1 touchlock
start
;;
'stop')
checkUser $1
stopit
;;
'restart')
checkUser $1 touchlock
stopit
start
;;
'status')
checkUser $1
status
;;
'dump')
checkUser $1
dump
;;
*)
echo "Usage: $0 { console | start | stop | restart | status | dump }"
exit 1
;;
esac
exit 0
No preview for this file type
# ------------------------------------------------------------------------
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
#********************************************************************
# Wrapper Properties
#********************************************************************
set.default.JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64/
set.default.KARAF_HOME=/opt/onos/apache-karaf-3.0.1
set.default.KARAF_BASE=/opt/onos/apache-karaf-3.0.1
set.default.KARAF_DATA=/opt/onos/apache-karaf-3.0.1/data
set.default.KARAF_ETC=/opt/onos/apache-karaf-3.0.1/etc
# Java Application
wrapper.working.dir=%KARAF_BASE%
wrapper.java.command=%JAVA_HOME%/bin/java
wrapper.java.mainclass=org.apache.karaf.wrapper.internal.Main
wrapper.java.classpath.1=%KARAF_HOME%/lib/karaf-wrapper.jar
wrapper.java.classpath.2=%KARAF_HOME%/lib/karaf.jar
wrapper.java.classpath.3=%KARAF_HOME%/lib/karaf-jmx-boot.jar
wrapper.java.classpath.4=%KARAF_HOME%/lib/karaf-jaas-boot.jar
wrapper.java.classpath.5=%KARAF_HOME%/lib/karaf-wrapper-main.jar
wrapper.java.classpath.6=%KARAF_HOME%/lib/karaf-org.osgi.core.jar
wrapper.java.library.path.1=%KARAF_HOME%/lib/
# Application Parameters. Add parameters as needed starting from 1
#wrapper.app.parameter.1=
# JVM Parameters
# note that n is the parameter number starting from 1.
wrapper.java.additional.1=-Dkaraf.home=%KARAF_HOME%
wrapper.java.additional.2=-Dkaraf.base=%KARAF_BASE%
wrapper.java.additional.3=-Dkaraf.data=%KARAF_DATA%
wrapper.java.additional.4=-Dkaraf.etc=%KARAF_ETC%
wrapper.java.additional.5=-Dcom.sun.management.jmxremote
wrapper.java.additional.6=-Djavax.management.builder.initial=org.apache.karaf.management.boot.KarafMBeanServerBuilder
wrapper.java.additional.7=-Dkaraf.startLocalConsole=false
wrapper.java.additional.8=-Dkaraf.startRemoteShell=true
wrapper.java.additional.9=-Djava.endorsed.dirs=%JAVA_HOME%/jre/lib/endorsed:%JAVA_HOME%/lib/endorsed:%KARAF_HOME%/lib/endorsed
wrapper.java.additional.10=-Djava.ext.dirs=%JAVA_HOME%/jre/lib/ext:%JAVA_HOME%/lib/ext:%KARAF_HOME%/lib/ext
# Uncomment to enable jmx
#wrapper.java.additional.n=-Dcom.sun.management.jmxremote.port=1616
#wrapper.java.additional.n=-Dcom.sun.management.jmxremote.authenticate=false
#wrapper.java.additional.n=-Dcom.sun.management.jmxremote.ssl=false
# Uncomment to enable YourKit profiling
#wrapper.java.additional.n=-Xrunyjpagent
# Uncomment to enable remote debugging
#wrapper.java.additional.n=-Xdebug -Xnoagent -Djava.compiler=NONE
#wrapper.java.additional.n=-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005
# Initial Java Heap Size (in MB)
#wrapper.java.initmemory=3
# Maximum Java Heap Size (in MB)
wrapper.java.maxmemory=512
#********************************************************************
# Wrapper Logging Properties
#********************************************************************
# Format of output for the console. (See docs for formats)
wrapper.console.format=PM
# Log Level for console output. (See docs for log levels)
wrapper.console.loglevel=INFO
# Log file to use for wrapper output logging.
wrapper.logfile=%KARAF_DATA%/log/wrapper.log
# Format of output for the log file. (See docs for formats)
wrapper.logfile.format=LPTM
# Log Level for log file output. (See docs for log levels)
wrapper.logfile.loglevel=INFO
# Maximum size that the log file will be allowed to grow to before
# the log is rolled. Size is specified in bytes. The default value
# of 0, disables log rolling. May abbreviate with the 'k' (kb) or
# 'm' (mb) suffix. For example: 10m = 10 megabytes.
wrapper.logfile.maxsize=10m
# Maximum number of rolled log files which will be allowed before old
# files are deleted. The default value of 0 implies no limit.
wrapper.logfile.maxfiles=5
# Log Level for sys/event log output. (See docs for log levels)
wrapper.syslog.loglevel=NONE
#********************************************************************
# Wrapper Windows Properties
#********************************************************************
# Title to use when running as a console
wrapper.console.title=onos
#********************************************************************
# Wrapper Windows NT/2000/XP Service Properties
#********************************************************************
# WARNING - Do not modify any of these properties when an application
# using this configuration file has been installed as a service.
# Please uninstall the service before modifying this section. The
# service can then be reinstalled.
# Name of the service
wrapper.ntservice.name=onos
# Display name of the service
wrapper.ntservice.displayname=onos
# Description of the service
wrapper.ntservice.description=ONOS
# Service dependencies. Add dependencies as needed starting from 1
wrapper.ntservice.dependency.1=
# Mode in which the service is installed. AUTO_START or DEMAND_START
wrapper.ntservice.starttype=AUTO_START
# Allow the service to interact with the desktop.
wrapper.ntservice.interactive=false
No preview for this file type
No preview for this file type
Artifacts for system testing onos.
#!/bin/bash
#-------------------------------------------------------------------------------
# Remotely pushes bits to a remote machine and install & starts ONOS.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
# If the first option is -f attempt uninstall first.
[ "$1" = "-f" ] && shift && onos-uninstall ${1:-$OCI}
remote=$ONOS_USER@${1:-$OCI}
scp -q $ONOS_TAR $remote:/tmp
ssh $remote "
[ -d $ONOS_INSTALL_DIR/bin ] && echo \"ONOS is already installed\" && exit 1
sudo mkdir -p $ONOS_INSTALL_DIR && sudo chown sdn:sdn $ONOS_INSTALL_DIR
tar zxmf /tmp/$ONOS_BITS.tar.gz -C $ONOS_INSTALL_DIR --strip-components=1
ln -s /opt/onos/$KARAF_DIST/data/log /opt/onos/log
"
#!/bin/bash
#-------------------------------------------------------------------------------
# Remotely stops & uninstalls ONOS.
#-------------------------------------------------------------------------------
[ ! -d "$ONOS_ROOT" ] && echo "ONOS_ROOT is not defined" >&2 && exit 1
. $ONOS_ROOT/tools/build/envDefaults
remote=$ONOS_USER@${1:-$OCI}
ssh $remote "
[ -f $ONOS_INSTALL_DIR/bin/onos ] && \
$ONOS_INSTALL_DIR/bin/onos halt 2>/dev/null
sudo rm -fr $ONOS_INSTALL_DIR
"
......@@ -122,7 +122,7 @@ public final class IpAddress {
if (mask > MAX_INET_MASK) {
throw new IllegalArgumentException(
"Value of subnet mask cannot exceed "
+ MAX_INET_MASK);
+ MAX_INET_MASK);
}
}
......@@ -204,7 +204,7 @@ public final class IpAddress {
byte [] net = new byte [4];
byte [] mask = bytes(mask());
for (int i = 0; i < INET_LEN; i++) {
net[i] = (byte) (octets[i] & mask[i]);
net[i] = (byte) (octets[i] & mask[i]);
}
return new IpAddress(version, net, netmask);
}
......@@ -225,11 +225,15 @@ public final class IpAddress {
byte [] host = new byte [INET_LEN];
byte [] mask = bytes(mask());
for (int i = 0; i < INET_LEN; i++) {
host[i] = (byte) (octets[i] & ~mask[i]);
host[i] = (byte) (octets[i] & ~mask[i]);
}
return new IpAddress(version, host, netmask);
}
public boolean isMasked() {
return mask() != 0;
}
@Override
public int hashCode() {
final int prime = 31;
......