[ONOS-7285][ONOS-7263] VLAN support by fabric.p4

Change-Id: I9ea460bca2698eb74f0d4988830a1e7cc7bc2768
This commit is contained in:
Yi Tseng 2018-05-24 23:27:39 +08:00
parent 4df657b4e7
commit 20f9e7b406
17 changed files with 1864 additions and 818 deletions

View File

@ -59,6 +59,8 @@ public final class FabricConstants {
buildPiMatchField(ETHERNET, "src_addr", true);
public static final PiMatchFieldId HF_ICMP_ICMP_TYPE_ID =
buildPiMatchField(ICMP, "icmp_type", true);
public static final PiMatchFieldId HF_STANDARD_METADATA_EGRESS_PORT_ID =
buildPiMatchField(STANDARD_METADATA, "egress_port", false);
public static final PiMatchFieldId HF_FABRIC_METADATA_NEXT_ID_ID =
buildPiMatchField(FABRIC_METADATA, "next_id", false);
public static final PiMatchFieldId HF_FABRIC_METADATA_L4_DST_PORT_ID =
@ -91,6 +93,8 @@ public final class FabricConstants {
PiTableId.of("FabricIngress.forwarding.acl");
public static final PiTableId TBL_HASHED_ID =
PiTableId.of("FabricIngress.next.hashed");
public static final PiTableId TBL_EGRESS_VLAN_ID =
PiTableId.of("FabricEgress.egress_next.egress_vlan");
public static final PiTableId TBL_MPLS_ID =
PiTableId.of("FabricIngress.forwarding.mpls");
public static final PiTableId TBL_MULTICAST_ID =
@ -105,12 +109,14 @@ public final class FabricConstants {
PiTableId.of("FabricIngress.filtering.fwd_classifier");
public static final PiTableId TBL_BRIDGING_ID =
PiTableId.of("FabricIngress.forwarding.bridging");
public static final PiTableId TBL_SIMPLE_ID =
PiTableId.of("FabricIngress.next.simple");
public static final PiTableId TBL_INGRESS_PORT_VLAN_ID =
PiTableId.of("FabricIngress.filtering.ingress_port_vlan");
public static final PiTableId TBL_UNICAST_V6_ID =
PiTableId.of("FabricIngress.forwarding.unicast_v6");
public static final PiTableId TBL_SIMPLE_ID =
PiTableId.of("FabricIngress.next.simple");
public static final PiTableId TBL_VLAN_META_ID =
PiTableId.of("FabricIngress.next.vlan_meta");
// Indirect Counter IDs
public static final PiCounterId CNT_EGRESS_PORT_COUNTER_ID =
@ -123,8 +129,8 @@ public final class FabricConstants {
PiCounterId.of("FabricIngress.forwarding.acl_counter");
public static final PiCounterId CNT_MULTICAST_COUNTER_ID =
PiCounterId.of("FabricIngress.next.multicast_counter");
public static final PiCounterId CNT_SIMPLE_COUNTER_ID =
PiCounterId.of("FabricIngress.next.simple_counter");
public static final PiCounterId CNT_VLAN_META_COUNTER_ID =
PiCounterId.of("FabricIngress.next.vlan_meta_counter");
public static final PiCounterId CNT_FWD_CLASSIFIER_COUNTER_ID =
PiCounterId.of("FabricIngress.filtering.fwd_classifier_counter");
public static final PiCounterId CNT_BRIDGING_COUNTER_ID =
@ -137,6 +143,8 @@ public final class FabricConstants {
PiCounterId.of("FabricIngress.forwarding.unicast_v6_counter");
public static final PiCounterId CNT_UNICAST_V4_COUNTER_ID =
PiCounterId.of("FabricIngress.forwarding.unicast_v4_counter");
public static final PiCounterId CNT_SIMPLE_COUNTER_ID =
PiCounterId.of("FabricIngress.next.simple_counter");
public static final PiCounterId CNT_INGRESS_PORT_VLAN_COUNTER_ID =
PiCounterId.of("FabricIngress.filtering.ingress_port_vlan_counter");
public static final PiCounterId CNT_MPLS_COUNTER_ID =
@ -147,8 +155,12 @@ public final class FabricConstants {
// Action IDs
public static final PiActionId ACT_FABRICINGRESS_FILTERING_DROP_ID =
PiActionId.of("FabricIngress.filtering.drop");
public static final PiActionId ACT_FABRICINGRESS_NEXT_SET_VLAN_ID =
PiActionId.of("FabricIngress.next.set_vlan");
public static final PiActionId ACT_FABRICINGRESS_FORWARDING_POP_MPLS_AND_NEXT_ID =
PiActionId.of("FabricIngress.forwarding.pop_mpls_and_next");
public static final PiActionId ACT_FABRICEGRESS_EGRESS_NEXT_POP_VLAN_ID =
PiActionId.of("FabricEgress.egress_next.pop_vlan");
public static final PiActionId ACT_FABRICINGRESS_FILTERING_SET_FORWARDING_TYPE_ID =
PiActionId.of("FabricIngress.filtering.set_forwarding_type");
public static final PiActionId ACT_NOP_ID = PiActionId.of("nop");
@ -156,6 +168,10 @@ public final class FabricConstants {
PiActionId.of("FabricIngress.filtering.set_vlan");
public static final PiActionId ACT_FABRICINGRESS_NEXT_MPLS_ROUTING_V6_ID =
PiActionId.of("FabricIngress.next.mpls_routing_v6");
public static final PiActionId ACT_FABRICEGRESS_PKT_IO_EGRESS_POP_VLAN_ID =
PiActionId.of("FabricEgress.pkt_io_egress.pop_vlan");
public static final PiActionId ACT_FABRICINGRESS_NEXT_L3_ROUTING_VLAN_ID =
PiActionId.of("FabricIngress.next.l3_routing_vlan");
public static final PiActionId ACT_NOACTION_ID = PiActionId.of("NoAction");
public static final PiActionId ACT_FABRICINGRESS_NEXT_SET_MCAST_GROUP_ID =
PiActionId.of("FabricIngress.next.set_mcast_group");
@ -203,6 +219,5 @@ public final class FabricConstants {
PiControlMetadataId.of("ingress_port");
public static final PiControlMetadataId CTRL_META_EGRESS_PORT_ID =
PiControlMetadataId.of("egress_port");
public static final int PORT_BITWIDTH = 9;
}

View File

@ -90,7 +90,10 @@ public class FabricInterpreter extends AbstractHandlerBehaviour
FabricConstants.TBL_ACL_ID);
private static final Set<PiTableId> NEXT_CTRL_TBLS = ImmutableSet.of(FabricConstants.TBL_SIMPLE_ID,
FabricConstants.TBL_HASHED_ID,
FabricConstants.TBL_MULTICAST_ID);
FabricConstants.TBL_MULTICAST_ID,
FabricConstants.TBL_VLAN_META_ID);
private static final Set<PiTableId> E_NEXT_CTRL_TBLS = ImmutableSet.of(FabricConstants.TBL_EGRESS_VLAN_ID);
private static final ImmutableMap<Criterion.Type, PiMatchFieldId> CRITERION_MAP =
ImmutableMap.<Criterion.Type, PiMatchFieldId>builder()
@ -161,6 +164,8 @@ public class FabricInterpreter extends AbstractHandlerBehaviour
return FabricTreatmentInterpreter.mapForwardingTreatment(treatment);
} else if (NEXT_CTRL_TBLS.contains(piTableId)) {
return FabricTreatmentInterpreter.mapNextTreatment(treatment);
} else if (E_NEXT_CTRL_TBLS.contains(piTableId)) {
return FabricTreatmentInterpreter.mapEgressNextTreatment(treatment);
} else {
throw new PiInterpreterException(String.format("Table %s unsupported", piTableId));
}

View File

@ -50,6 +50,13 @@ import static org.slf4j.LoggerFactory.getLogger;
final class FabricTreatmentInterpreter {
private static final Logger log = getLogger(FabricTreatmentInterpreter.class);
private static final String INVALID_TREATMENT = "Invalid treatment for %s block: %s";
private static final PiAction NOP = PiAction.builder()
.withId(FabricConstants.ACT_NOP_ID)
.build();
private static final PiAction POP_VLAN = PiAction.builder()
.withId(FabricConstants.ACT_FABRICEGRESS_EGRESS_NEXT_POP_VLAN_ID)
.build();
// Hide default constructor
protected FabricTreatmentInterpreter() {
@ -72,9 +79,7 @@ final class FabricTreatmentInterpreter {
Instruction noActInst = Instructions.createNoAction();
if (instructions.isEmpty() || instructions.contains(noActInst)) {
// nop
return PiAction.builder()
.withId(FabricConstants.ACT_NOP_ID)
.build();
return NOP;
}
L2ModificationInstruction.ModVlanHeaderInstruction pushVlanInst = null;
@ -159,17 +164,16 @@ final class FabricTreatmentInterpreter {
* output
* set_vlan_output
* l3_routing
* l3_routing_vlan
* mpls_routing_v4
*
* Unsupported, using PiAction directly:
* set_next_type
*
* Unsupported, need to find a way to implement it
* mpls_routing_v6
*/
public static PiAction mapNextTreatment(TrafficTreatment treatment)
throws PiInterpreterException {
// TODO: refactor this method
List<Instruction> insts = treatment.allInstructions();
OutputInstruction outInst = null;
ModEtherInstruction modEthDstInst = null;
@ -195,8 +199,6 @@ final class FabricTreatmentInterpreter {
case MPLS_LABEL:
modMplsInst = (ModMplsLabelInstruction) l2Inst;
break;
case VLAN_PUSH:
break;
default:
log.warn("Unsupported l2 instruction sub type: {}", l2Inst.subtype());
break;
@ -212,8 +214,23 @@ final class FabricTreatmentInterpreter {
}
if (outInst == null) {
// for vlan_meta table only
if (modVlanIdInst != null) {
// set_vlan
VlanId vlanId = modVlanIdInst.vlanId();
PiActionParam newVlanParam =
new PiActionParam(FabricConstants.ACT_PRM_NEW_VLAN_ID_ID,
ImmutableByteSequence.copyFrom(vlanId.toShort()));
// set_vlan_output
return PiAction.builder()
.withId(FabricConstants.ACT_FABRICINGRESS_NEXT_SET_VLAN_ID)
.withParameter(newVlanParam)
.build();
} else {
throw new PiInterpreterException(format(INVALID_TREATMENT, "next", treatment));
}
}
short portNum = (short) outInst.port().toLong();
PiActionParam portNumParam = new PiActionParam(FabricConstants.ACT_PRM_PORT_NUM_ID,
ImmutableByteSequence.copyFrom(portNum));
@ -267,6 +284,17 @@ final class FabricTreatmentInterpreter {
}
}
if (modVlanIdInst != null) {
VlanId vlanId = modVlanIdInst.vlanId();
PiActionParam vlanParam =
new PiActionParam(FabricConstants.ACT_PRM_NEW_VLAN_ID_ID,
ImmutableByteSequence.copyFrom(vlanId.toShort()));
// L3 routing and set VLAN
return PiAction.builder()
.withId(FabricConstants.ACT_FABRICINGRESS_NEXT_L3_ROUTING_VLAN_ID)
.withParameters(ImmutableList.of(srcMacParam, dstMacParam, portNumParam, vlanParam))
.build();
} else {
// L3 routing
return PiAction.builder()
.withId(FabricConstants.ACT_FABRICINGRESS_NEXT_L3_ROUTING_ID)
@ -275,7 +303,20 @@ final class FabricTreatmentInterpreter {
dstMacParam))
.build();
}
}
throw new PiInterpreterException(format(INVALID_TREATMENT, "next", treatment));
}
public static PiAction mapEgressNextTreatment(TrafficTreatment treatment) {
// Pop VLAN action for now, may add new action to this control block in the future.
return treatment.allInstructions()
.stream()
.filter(inst -> inst.type() == Instruction.Type.L2MODIFICATION)
.map(inst -> (L2ModificationInstruction) inst)
.filter(inst -> inst.subtype() == L2ModificationInstruction.L2SubType.VLAN_POP)
.findFirst()
.map(inst -> POP_VLAN)
.orElse(NOP);
}
}

View File

@ -16,16 +16,21 @@
package org.onosproject.pipelines.fabric.pipeliner;
import org.onlab.packet.VlanId;
import org.onosproject.net.DeviceId;
import org.onosproject.net.PortNumber;
import org.onosproject.net.driver.Driver;
import org.onosproject.net.flow.DefaultFlowRule;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.DefaultTrafficTreatment;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.TrafficTreatment;
import org.onosproject.net.flow.criteria.Criterion;
import org.onosproject.net.flow.criteria.PiCriterion;
import org.onosproject.net.flow.criteria.VlanIdCriterion;
import org.onosproject.net.flow.instructions.Instruction;
import org.onosproject.net.flow.instructions.Instructions.OutputInstruction;
import org.onosproject.net.flow.instructions.L2ModificationInstruction;
import org.onosproject.net.flowobjective.DefaultNextObjective;
import org.onosproject.net.flowobjective.NextObjective;
import org.onosproject.net.flowobjective.Objective;
@ -44,8 +49,11 @@ import java.util.stream.Collectors;
import static org.onosproject.pipelines.fabric.FabricConstants.ACT_PRF_FABRICINGRESS_NEXT_ECMP_SELECTOR_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.HF_FABRIC_METADATA_NEXT_ID_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.HF_STANDARD_METADATA_EGRESS_PORT_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_EGRESS_VLAN_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_HASHED_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_SIMPLE_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_VLAN_META_ID;
import static org.slf4j.LoggerFactory.getLogger;
/**
@ -66,6 +74,8 @@ public class FabricNextPipeliner {
public PipelinerTranslationResult next(NextObjective nextObjective) {
PipelinerTranslationResult.Builder resultBuilder = PipelinerTranslationResult.builder();
processNextVlanMeta(nextObjective, resultBuilder);
switch (nextObjective.type()) {
case SIMPLE:
processSimpleNext(nextObjective, resultBuilder);
@ -82,6 +92,38 @@ public class FabricNextPipeliner {
return resultBuilder.build();
}
private void processNextVlanMeta(NextObjective next,
PipelinerTranslationResult.Builder resultBuilder) {
TrafficSelector meta = next.meta();
if (meta == null) {
// do nothing if there is no metadata in the next objective.
return;
}
VlanIdCriterion vlanIdCriterion =
(VlanIdCriterion) meta.getCriterion(Criterion.Type.VLAN_VID);
if (vlanIdCriterion == null) {
// do nothing if we can't find vlan from next objective metadata.
return;
}
VlanId vlanId = vlanIdCriterion.vlanId();
TrafficSelector selector = buildNextIdSelector(next.id());
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setVlanId(vlanId)
.build();
resultBuilder.addFlowRule(DefaultFlowRule.builder()
.withSelector(selector)
.withTreatment(treatment)
.forTable(TBL_VLAN_META_ID)
.makePermanent()
.withPriority(next.priority())
.forDevice(deviceId)
.fromApp(next.appId())
.build());
}
private void processSimpleNext(NextObjective next,
PipelinerTranslationResult.Builder resultBuilder) {
@ -93,18 +135,14 @@ public class FabricNextPipeliner {
TrafficSelector selector = buildNextIdSelector(next.id());
TrafficTreatment treatment = next.next().iterator().next();
OutputInstruction outputInst = treatment.allInstructions()
.stream()
.filter(inst -> inst.type() == Instruction.Type.OUTPUT)
.map(inst -> (OutputInstruction) inst)
.findFirst()
.orElse(null);
PortNumber outputPort = getOutputPort(treatment);
if (outputInst == null) {
if (outputPort == null) {
log.warn("At least one output instruction in simple next objective");
resultBuilder.setError(ObjectiveError.BADPARAMS);
return;
}
resultBuilder.addFlowRule(DefaultFlowRule.builder()
.withSelector(selector)
.withTreatment(treatment)
@ -114,29 +152,81 @@ public class FabricNextPipeliner {
.forDevice(deviceId)
.fromApp(next.appId())
.build());
if (includesPopVlanInst(treatment)) {
processVlanPopRule(outputPort, next, resultBuilder);
}
}
private void processHashedNext(NextObjective nextObjective, PipelinerTranslationResult.Builder resultBuilder) {
private PortNumber getOutputPort(TrafficTreatment treatment) {
return treatment.allInstructions()
.stream()
.filter(inst -> inst.type() == Instruction.Type.OUTPUT)
.map(inst -> (OutputInstruction) inst)
.findFirst()
.map(OutputInstruction::port)
.orElse(null);
}
private boolean includesPopVlanInst(TrafficTreatment treatment) {
return treatment.allInstructions()
.stream()
.filter(inst -> inst.type() == Instruction.Type.L2MODIFICATION)
.map(inst -> (L2ModificationInstruction) inst)
.anyMatch(inst -> inst.subtype() == L2ModificationInstruction.L2SubType.VLAN_POP);
}
private void processVlanPopRule(PortNumber port, NextObjective next,
PipelinerTranslationResult.Builder resultBuilder) {
TrafficSelector meta = next.meta();
VlanIdCriterion vlanIdCriterion =
(VlanIdCriterion) meta.getCriterion(Criterion.Type.VLAN_VID);
VlanId vlanId = vlanIdCriterion.vlanId();
PiCriterion egressVlanTableMatch = PiCriterion.builder()
.matchExact(HF_STANDARD_METADATA_EGRESS_PORT_ID,
(short) port.toLong())
.build();
// Add VLAN pop rule to egress pipeline table
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchPi(egressVlanTableMatch)
.matchVlanId(vlanId)
.build();
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.popVlan()
.build();
resultBuilder.addFlowRule(DefaultFlowRule.builder()
.withSelector(selector)
.withTreatment(treatment)
.forTable(TBL_EGRESS_VLAN_ID)
.makePermanent()
.withPriority(next.priority())
.forDevice(deviceId)
.fromApp(next.appId())
.build());
}
private void processHashedNext(NextObjective next, PipelinerTranslationResult.Builder resultBuilder) {
boolean noHashedTable = Boolean.parseBoolean(driver.getProperty(NO_HASHED_TABLE));
if (noHashedTable) {
if (nextObjective.next().isEmpty()) {
if (next.next().isEmpty()) {
return;
}
// use first action if not support hashed group
TrafficTreatment treatment = nextObjective.next().iterator().next();
TrafficTreatment treatment = next.next().iterator().next();
NextObjective.Builder simpleNext = DefaultNextObjective.builder()
.addTreatment(treatment)
.withId(nextObjective.id())
.fromApp(nextObjective.appId())
.withId(next.id())
.fromApp(next.appId())
.makePermanent()
.withMeta(nextObjective.meta())
.withPriority(nextObjective.priority())
.withMeta(next.meta())
.withPriority(next.priority())
.withType(NextObjective.Type.SIMPLE);
if (nextObjective.context().isPresent()) {
processSimpleNext(simpleNext.add(nextObjective.context().get()), resultBuilder);
if (next.context().isPresent()) {
processSimpleNext(simpleNext.add(next.context().get()), resultBuilder);
} else {
processSimpleNext(simpleNext.add(), resultBuilder);
}
@ -144,15 +234,23 @@ public class FabricNextPipeliner {
}
// create hash groups
int groupId = nextObjective.id();
List<GroupBucket> bucketList = nextObjective.next().stream()
int groupId = next.id();
List<GroupBucket> bucketList = next.next().stream()
.map(DefaultGroupBucket::createSelectGroupBucket)
.collect(Collectors.toList());
if (bucketList.size() != nextObjective.next().size()) {
// Egress VLAN handling
next.next().forEach(treatment -> {
PortNumber outputPort = getOutputPort(treatment);
if (includesPopVlanInst(treatment) && outputPort != null) {
processVlanPopRule(outputPort, next, resultBuilder);
}
});
if (bucketList.size() != next.next().size()) {
// some action not converted
// set error
log.warn("Expected bucket size {}, got {}", nextObjective.next().size(), bucketList.size());
log.warn("Expected bucket size {}, got {}", next.next().size(), bucketList.size());
resultBuilder.setError(ObjectiveError.BADPARAMS);
return;
}
@ -167,18 +265,18 @@ public class FabricNextPipeliner {
buckets,
groupKey,
groupId,
nextObjective.appId()));
next.appId()));
// flow
// If operation is ADD_TO_EXIST or REMOVE_FROM_EXIST, means we modify
// group buckets only, no changes for flow rule
if (nextObjective.op() == Objective.Operation.ADD_TO_EXISTING ||
nextObjective.op() == Objective.Operation.REMOVE_FROM_EXISTING) {
if (next.op() == Objective.Operation.ADD_TO_EXISTING ||
next.op() == Objective.Operation.REMOVE_FROM_EXISTING) {
return;
}
TrafficSelector selector = buildNextIdSelector(nextObjective.id());
TrafficSelector selector = buildNextIdSelector(next.id());
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.piTableAction(PiActionGroupId.of(nextObjective.id()))
.piTableAction(PiActionGroupId.of(next.id()))
.build();
resultBuilder.addFlowRule(DefaultFlowRule.builder()
@ -186,9 +284,9 @@ public class FabricNextPipeliner {
.withTreatment(treatment)
.forTable(TBL_HASHED_ID)
.makePermanent()
.withPriority(nextObjective.priority())
.withPriority(next.priority())
.forDevice(deviceId)
.fromApp(nextObjective.appId())
.fromApp(next.appId())
.build());
}

View File

@ -263,7 +263,12 @@ public class FabricPipeliner extends AbstractHandlerBehaviour implements Pipeli
};
FlowRuleOperations ops = buildFlowRuleOps(objective, flowRules, ctx);
if (ops != null) {
flowRuleService.apply(ops);
} else {
// remove pendings
flowRules.forEach(flowRule -> pendingInstallObjectiveFlows.remove(flowRule.id()));
}
}
private void installGroups(Objective objective, Collection<GroupDescription> groups) {
@ -317,8 +322,13 @@ public class FabricPipeliner extends AbstractHandlerBehaviour implements Pipeli
case REMOVE:
flowRules.forEach(ops::remove);
break;
case ADD_TO_EXISTING:
case REMOVE_FROM_EXISTING:
// Next objective may use ADD_TO_EXIST or REMOVE_FROM_EXIST op
// No need to update FlowRuls for vlan_meta table.
return null;
default:
log.warn("Unsupported op {} for {}", objective);
log.warn("Unsupported op {} for {}", objective.op(), objective);
fail(objective, ObjectiveError.BADPARAMS);
return null;
}

View File

@ -39,7 +39,6 @@ inout standard_metadata_t standard_metadata) {
Forwarding() forwarding;
Next() next;
PortCountersControl() port_counters_control;
EgressNextControl() egress_next;
apply {
packet_io_ingress.apply(hdr, fabric_metadata, standard_metadata);
@ -55,7 +54,7 @@ inout standard_metadata_t standard_metadata) {
forwarding.apply(hdr, fabric_metadata, standard_metadata);
next.apply(hdr, fabric_metadata, standard_metadata);
port_counters_control.apply(hdr, fabric_metadata, standard_metadata);
egress_next.apply(hdr, fabric_metadata, standard_metadata);
}
}
@ -63,7 +62,10 @@ control FabricEgress (inout parsed_headers_t hdr,
inout fabric_metadata_t fabric_metadata,
inout standard_metadata_t standard_metadata) {
PacketIoEgress() pkt_io_egress;
EgressNextControl() egress_next;
apply {
egress_next.apply(hdr, fabric_metadata, standard_metadata);
pkt_io_egress.apply(hdr, fabric_metadata, standard_metadata);
#ifdef WITH_SPGW
spgw_egress.apply(hdr.ipv4, hdr.gtpu_ipv4, hdr.gtpu_udp, hdr.gtpu,

View File

@ -45,8 +45,8 @@ control Filtering (
hdr.ethernet.ether_type = ETHERTYPE_VLAN;
set_vlan(new_vlan_id);
// pop internal vlan before output
fabric_metadata.pop_vlan_at_egress = true;
// pop internal vlan before packet in
fabric_metadata.pop_vlan_when_packet_in = true;
}
action set_forwarding_type(fwd_type_t fwd_type) {

View File

@ -25,6 +25,7 @@ control Next (
inout fabric_metadata_t fabric_metadata,
inout standard_metadata_t standard_metadata) {
action_selector(HashAlgorithm.crc16, 32w64, 32w16) ecmp_selector;
direct_counter(CounterType.packets_and_bytes) vlan_meta_counter;
direct_counter(CounterType.packets_and_bytes) simple_counter;
direct_counter(CounterType.packets_and_bytes) hashed_counter;
@ -32,11 +33,17 @@ control Next (
standard_metadata.egress_spec = port_num;
}
action set_vlan(vlan_id_t new_vlan_id) {
hdr.vlan_tag.vlan_id = new_vlan_id;
}
action pop_vlan() {
hdr.ethernet.ether_type = hdr.vlan_tag.ether_type;
hdr.vlan_tag.setInvalid();
}
action set_vlan_output(vlan_id_t new_vlan_id, port_num_t port_num){
hdr.vlan_tag.vlan_id = new_vlan_id;
// don't remove the vlan from egress since we set the vlan to it.
fabric_metadata.pop_vlan_at_egress = false;
output(port_num);
}
@ -54,6 +61,12 @@ control Next (
output(port_num);
}
action l3_routing_vlan(port_num_t port_num, mac_addr_t smac, mac_addr_t dmac, vlan_id_t new_vlan_id) {
rewrite_smac(smac);
rewrite_dmac(dmac);
set_vlan_output(new_vlan_id, port_num);
}
action push_mpls (mpls_label_t label, bit<3> tc) {
// Suppose that the maximum number of label is one.
hdr.mpls.setValid();
@ -80,6 +93,19 @@ control Next (
push_mpls(label, 3w0);
}
table vlan_meta {
key = {
fabric_metadata.next_id: exact;
}
actions = {
set_vlan;
nop;
}
default_action = nop;
counters = vlan_meta_counter;
}
table simple {
key = {
fabric_metadata.next_id: exact;
@ -90,6 +116,7 @@ control Next (
set_vlan_output;
l3_routing;
mpls_routing_v4;
l3_routing_vlan;
}
counters = simple_counter;
}
@ -137,6 +164,7 @@ control Next (
#endif // WITH_MULTICAST
apply {
vlan_meta.apply();
if (simple.apply().hit) {
if (!hdr.mpls.isValid()) {
if(hdr.ipv4.isValid()) {
@ -159,13 +187,26 @@ control Next (
control EgressNextControl (
inout parsed_headers_t hdr,
inout fabric_metadata_t fabric_metadata,
inout standard_metadata_t standard_metadata){
inout standard_metadata_t standard_metadata) {
apply {
// pop internal vlan if the meta is set
if (fabric_metadata.pop_vlan_at_egress) {
action pop_vlan() {
hdr.ethernet.ether_type = hdr.vlan_tag.ether_type;
hdr.vlan_tag.setInvalid();
}
table egress_vlan {
key = {
hdr.vlan_tag.vlan_id: exact;
standard_metadata.egress_port: exact;
}
actions = {
pop_vlan;
nop;
}
default_action = nop;
}
apply {
egress_vlan.apply();
}
}

View File

@ -34,8 +34,15 @@ control PacketIoEgress(
inout parsed_headers_t hdr,
inout fabric_metadata_t fabric_metadata,
inout standard_metadata_t standard_metadata) {
action pop_vlan() {
hdr.ethernet.ether_type = hdr.vlan_tag.ether_type;
hdr.vlan_tag.setInvalid();
}
apply {
if (standard_metadata.egress_port == CPU_PORT) {
if (hdr.vlan_tag.isValid() && fabric_metadata.pop_vlan_when_packet_in) {
pop_vlan();
}
hdr.packet_in.setValid();
hdr.packet_in.ingress_port = standard_metadata.ingress_port;
}

View File

@ -151,7 +151,7 @@ struct spgw_meta_t {
struct fabric_metadata_t {
fwd_type_t fwd_type;
next_id_t next_id;
bool pop_vlan_at_egress;
bool pop_vlan_when_packet_in;
bit<8> ip_proto;
bit<16> l4_src_port;
bit<16> l4_dst_port;

View File

@ -54,6 +54,7 @@ inout standard_metadata_t standard_metadata) {
state parse_vlan_tag {
packet.extract(hdr.vlan_tag);
fabric_metadata.original_ether_type = hdr.vlan_tag.ether_type;
transition select(hdr.vlan_tag.ether_type){
ETHERTYPE_ARP: parse_arp;
ETHERTYPE_IPV4: parse_ipv4;

View File

@ -390,6 +390,27 @@ tables {
direct_resource_ids: 301995056
size: 256
}
tables {
preamble {
id: 33562709
name: "FabricIngress.next.vlan_meta"
alias: "vlan_meta"
}
match_fields {
id: 1
name: "fabric_metadata.next_id"
bitwidth: 32
match_type: EXACT
}
action_refs {
id: 16790685
}
action_refs {
id: 16819938
}
direct_resource_ids: 302008112
size: 1024
}
tables {
preamble {
id: 33571723
@ -414,6 +435,9 @@ tables {
action_refs {
id: 16780811
}
action_refs {
id: 16791579
}
action_refs {
id: 16800567
annotations: "@defaultonly()"
@ -472,6 +496,32 @@ tables {
direct_resource_ids: 302024536
size: 1024
}
tables {
preamble {
id: 33599342
name: "FabricEgress.egress_next.egress_vlan"
alias: "egress_vlan"
}
match_fields {
id: 1
name: "hdr.vlan_tag.vlan_id"
bitwidth: 12
match_type: EXACT
}
match_fields {
id: 2
name: "standard_metadata.egress_port"
bitwidth: 9
match_type: EXACT
}
action_refs {
id: 16790030
}
action_refs {
id: 16819938
}
size: 1024
}
actions {
preamble {
id: 16800567
@ -540,7 +590,7 @@ actions {
preamble {
id: 16793253
name: "FabricIngress.filtering.set_vlan"
alias: "set_vlan"
alias: "filtering.set_vlan"
}
params {
id: 1
@ -622,6 +672,18 @@ actions {
bitwidth: 9
}
}
actions {
preamble {
id: 16790685
name: "FabricIngress.next.set_vlan"
alias: "next.set_vlan"
}
params {
id: 1
name: "new_vlan_id"
bitwidth: 12
}
}
actions {
preamble {
id: 16808391
@ -661,6 +723,33 @@ actions {
bitwidth: 48
}
}
actions {
preamble {
id: 16791579
name: "FabricIngress.next.l3_routing_vlan"
alias: "l3_routing_vlan"
}
params {
id: 1
name: "port_num"
bitwidth: 9
}
params {
id: 2
name: "smac"
bitwidth: 48
}
params {
id: 3
name: "dmac"
bitwidth: 48
}
params {
id: 4
name: "new_vlan_id"
bitwidth: 12
}
}
actions {
preamble {
id: 16780811
@ -739,6 +828,20 @@ actions {
alias: "gtpu_encap"
}
}
actions {
preamble {
id: 16801047
name: "FabricEgress.pkt_io_egress.pop_vlan"
alias: "pkt_io_egress.pop_vlan"
}
}
actions {
preamble {
id: 16790030
name: "FabricEgress.egress_next.pop_vlan"
alias: "egress_next.pop_vlan"
}
}
action_profiles {
preamble {
id: 285233747
@ -881,6 +984,17 @@ direct_counters {
}
direct_table_id: 33574876
}
direct_counters {
preamble {
id: 302008112
name: "FabricIngress.next.vlan_meta_counter"
alias: "vlan_meta_counter"
}
spec {
unit: BOTH
}
direct_table_id: 33562709
}
direct_counters {
preamble {
id: 301991880

File diff suppressed because it is too large Load Diff

View File

@ -313,6 +313,27 @@ tables {
direct_resource_ids: 301995056
size: 256
}
tables {
preamble {
id: 33562709
name: "FabricIngress.next.vlan_meta"
alias: "vlan_meta"
}
match_fields {
id: 1
name: "fabric_metadata.next_id"
bitwidth: 32
match_type: EXACT
}
action_refs {
id: 16790685
}
action_refs {
id: 16819938
}
direct_resource_ids: 302008112
size: 1024
}
tables {
preamble {
id: 33571723
@ -337,6 +358,9 @@ tables {
action_refs {
id: 16780811
}
action_refs {
id: 16791579
}
action_refs {
id: 16800567
annotations: "@defaultonly()"
@ -395,6 +419,32 @@ tables {
direct_resource_ids: 302024536
size: 1024
}
tables {
preamble {
id: 33599342
name: "FabricEgress.egress_next.egress_vlan"
alias: "egress_vlan"
}
match_fields {
id: 1
name: "hdr.vlan_tag.vlan_id"
bitwidth: 12
match_type: EXACT
}
match_fields {
id: 2
name: "standard_metadata.egress_port"
bitwidth: 9
match_type: EXACT
}
action_refs {
id: 16790030
}
action_refs {
id: 16819938
}
size: 1024
}
actions {
preamble {
id: 16819938
@ -420,7 +470,7 @@ actions {
preamble {
id: 16793253
name: "FabricIngress.filtering.set_vlan"
alias: "set_vlan"
alias: "filtering.set_vlan"
}
params {
id: 1
@ -502,6 +552,18 @@ actions {
bitwidth: 9
}
}
actions {
preamble {
id: 16790685
name: "FabricIngress.next.set_vlan"
alias: "next.set_vlan"
}
params {
id: 1
name: "new_vlan_id"
bitwidth: 12
}
}
actions {
preamble {
id: 16808391
@ -541,6 +603,33 @@ actions {
bitwidth: 48
}
}
actions {
preamble {
id: 16791579
name: "FabricIngress.next.l3_routing_vlan"
alias: "l3_routing_vlan"
}
params {
id: 1
name: "port_num"
bitwidth: 9
}
params {
id: 2
name: "smac"
bitwidth: 48
}
params {
id: 3
name: "dmac"
bitwidth: 48
}
params {
id: 4
name: "new_vlan_id"
bitwidth: 12
}
}
actions {
preamble {
id: 16780811
@ -612,6 +701,20 @@ actions {
bitwidth: 48
}
}
actions {
preamble {
id: 16801047
name: "FabricEgress.pkt_io_egress.pop_vlan"
alias: "pkt_io_egress.pop_vlan"
}
}
actions {
preamble {
id: 16790030
name: "FabricEgress.egress_next.pop_vlan"
alias: "egress_next.pop_vlan"
}
}
action_profiles {
preamble {
id: 285233747
@ -743,6 +846,17 @@ direct_counters {
}
direct_table_id: 33574876
}
direct_counters {
preamble {
id: 302008112
name: "FabricIngress.next.vlan_meta_counter"
alias: "vlan_meta_counter"
}
spec {
unit: BOTH
}
direct_table_id: 33562709
}
direct_counters {
preamble {
id: 301991880

View File

@ -44,11 +44,36 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.onosproject.pipelines.fabric.FabricConstants.ACT_PRF_FABRICINGRESS_NEXT_ECMP_SELECTOR_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_HASHED_ID;
import static org.onosproject.pipelines.fabric.FabricConstants.TBL_VLAN_META_ID;
/**
* Test cases for fabric.p4 pipeline next control block.
*/
public class FabricNextPipelinerTest extends FabricPipelinerTest {
private FlowRule vlanMetaFlowRule;
public FabricNextPipelinerTest() {
PiCriterion nextIdCriterion = PiCriterion.builder()
.matchExact(FabricConstants.HF_FABRIC_METADATA_NEXT_ID_ID, NEXT_ID_1)
.build();
TrafficSelector selector = DefaultTrafficSelector.builder()
.matchPi(nextIdCriterion)
.build();
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setVlanId(VLAN_100)
.build();
vlanMetaFlowRule = DefaultFlowRule.builder()
.withSelector(selector)
.withTreatment(treatment)
.forTable(TBL_VLAN_META_ID)
.makePermanent()
// FIXME: currently next objective doesn't support priority, ignore this
.withPriority(0)
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
.build();
}
/**
* Test program output rule for Simple table.
@ -86,10 +111,25 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
testSimple(treatment);
}
/**
* Test program set mac, set vlan, and output rule for Simple table.
*/
@Test
public void testSimpleOutputWithVlanAndMacTranslation() {
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.setEthSrc(ROUTER_MAC)
.setEthDst(HOST_MAC)
.setVlanId(VLAN_100)
.setOutput(PORT_1)
.build();
testSimple(treatment);
}
private void testSimple(TrafficTreatment treatment) {
NextObjective nextObjective = DefaultNextObjective.builder()
.withId(NEXT_ID_1)
.withPriority(PRIORITY)
.withMeta(VLAN_META)
.addTreatment(treatment)
.withType(NextObjective.Type.SIMPLE)
.makePermanent()
@ -100,7 +140,7 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
List<FlowRule> flowRulesInstalled = (List<FlowRule>) result.flowRules();
List<GroupDescription> groupsInstalled = (List<GroupDescription>) result.groups();
assertEquals(1, flowRulesInstalled.size());
assertEquals(2, flowRulesInstalled.size());
assertTrue(groupsInstalled.isEmpty());
// Simple table
@ -110,7 +150,12 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
TrafficSelector nextIdSelector = DefaultTrafficSelector.builder()
.matchPi(nextIdCriterion)
.build();
// VLAN meta table
FlowRule actualFlowRule = flowRulesInstalled.get(0);
assertTrue(actualFlowRule.exactMatch(vlanMetaFlowRule));
actualFlowRule = flowRulesInstalled.get(1);
FlowRule expectedFlowRule = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.fromApp(APP_ID)
@ -143,6 +188,7 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
NextObjective nextObjective = DefaultNextObjective.builder()
.withId(NEXT_ID_1)
.withPriority(PRIORITY)
.withMeta(VLAN_META)
.addTreatment(treatment1)
.addTreatment(treatment2)
.withType(NextObjective.Type.HASHED)
@ -155,7 +201,7 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
// Should generates 2 flows and 1 group
List<FlowRule> flowRulesInstalled = (List<FlowRule>) result.flowRules();
List<GroupDescription> groupsInstalled = (List<GroupDescription>) result.groups();
assertEquals(1, flowRulesInstalled.size());
assertEquals(2, flowRulesInstalled.size());
assertEquals(1, groupsInstalled.size());
// Hashed table
@ -169,7 +215,12 @@ public class FabricNextPipelinerTest extends FabricPipelinerTest {
TrafficTreatment treatment = DefaultTrafficTreatment.builder()
.piTableAction(actionGroupId)
.build();
// VLAN meta table
FlowRule actualFlowRule = flowRulesInstalled.get(0);
assertTrue(actualFlowRule.exactMatch(vlanMetaFlowRule));
actualFlowRule = flowRulesInstalled.get(1);
FlowRule expectedFlowRule = DefaultFlowRule.builder()
.forDevice(DEVICE_ID)
.fromApp(APP_ID)

View File

@ -30,6 +30,8 @@ import org.onosproject.net.PortNumber;
import org.onosproject.net.behaviour.PipelinerContext;
import org.onosproject.net.driver.Driver;
import org.onosproject.net.driver.DriverHandler;
import org.onosproject.net.flow.DefaultTrafficSelector;
import org.onosproject.net.flow.TrafficSelector;
import org.onosproject.net.flow.criteria.PiCriterion;
import org.onosproject.net.group.GroupService;
import org.onosproject.pipelines.fabric.FabricConstants;
@ -54,6 +56,9 @@ public abstract class FabricPipelinerTest {
static final IpPrefix IPV6_MCAST_ADDR = IpPrefix.valueOf("ff00::1/32");
static final MplsLabel MPLS_10 = MplsLabel.mplsLabel(10);
static final Integer NEXT_ID_1 = 1;
static final TrafficSelector VLAN_META = DefaultTrafficSelector.builder()
.matchVlanId(VLAN_100)
.build();
// Forwarding types
static final byte FWD_BRIDGING = 0;