<?xml version="1.0" encoding="US-ASCII"?>
<!-- This template is for creating an Internet Draft using xml2rfc, which is available here: http://xml.resource.org. -->
<!DOCTYPE rfc SYSTEM "rfc2629.dtd" [
<!-- One method to get references from the online citation libraries.
There has to be one entity for each item to be referenced.
An alternate method (rfc include) is described in the references. -->
<!ENTITY RFC2119 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml">
<!ENTITY RFC5304 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5304.xml">
<!ENTITY RFC5310 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5310.xml">
<!ENTITY RFC4271 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4271.xml">
<!ENTITY RFC4655 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4655.xml">
<!ENTITY RFC5301 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5301.xml">
<!ENTITY RFC5306 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5306.xml">
<!ENTITY RFC5308 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5308.xml">
<!ENTITY RFC5309 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5309.xml">
<!ENTITY RFC5120 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5120.xml">
<!ENTITY RFC6822 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.6822.xml">
<!ENTITY RFC7602 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7602.xml">
<!ENTITY RFC7938 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7938.xml">
<!ENTITY RFC7855 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7855.xml">
<!ENTITY RFC2328 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2328.xml">
<!ENTITY RFC1142 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.1142.xml">
<!ENTITY RFC5303 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5303.xml">
<!ENTITY RFC6234 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.6234.xml">

<!ENTITY RFC2365 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.2365.xml">
<!ENTITY RFC4291 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.4291.xml">
<!ENTITY RFC5881 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5881.xml">
<!ENTITY RFC5709 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.5709.xml">
<!ENTITY RFC7987 SYSTEM "http://xml.resource.org/public/rfc/bibxml/reference.RFC.7987.xml">
]>

<?xml-stylesheet type='text/xsl' href='rfc2629.xslt' ?>
<!-- used by XSLT processors -->
<!-- For a complete list and description of processing instructions (PIs),
 please see http://xml.resource.org/authoring/README.html. -->
<!-- Below are generally applicable Processing Instructions (PIs) that most I-Ds might want to use.
 (Here they are set differently than their defaults in xml2rfc v1.32) -->
<?rfc strict="yes" ?>
<!-- give errors regarding ID-nits and DTD validation -->
<!-- control the table of contents (ToC) -->
<?rfc toc="yes"?>
<!-- generate a ToC -->
<?rfc tocdepth="4"?>
<!-- the number of levels of subsections in ToC. default: 3 -->
<!-- control references -->
<?rfc symrefs="yes"?>
<!-- use symbolic references tags, i.e, [RFC2119] instead of [1] -->
<?rfc sortrefs="yes" ?>
<!-- sort the reference entries alphabetically -->
<!-- control vertical white space
 (using these PIs as follows is recommended by the RFC Editor) -->
<?rfc compact="yes" ?>
<!-- do not start each main section on a new page -->
<?rfc subcompact="no" ?>
<!-- keep one blank line between list items -->
<!-- end of list of popular I-D processing instructions -->
<rfc category="std" docName="draft-przygienda-rift-05" ipr="trust200902">
    <!-- category values: std, bcp, info, exp, and historic
     ipr values: full3667, noModification3667, noDerivatives3667
     you can add the attributes updates="NNNN" and obsoletes="NNNN"
     they will automatically be output with "(if approved)" -->

    <front>
        <!-- The abbreviated title is used in the page header - it is 
         only necessary if the
         full title is longer than 39 characters -->

        <title abbrev="RIFT">RIFT: Routing in Fat Trees</title>

        <!-- add 'role="editor"' below for the editors if appropriate -->

        <!-- Another author who claims to be an editor -->


        <author fullname="Tony Przygienda" initials="T"
            surname="Przygienda" role="editor">

            <organization>Juniper Networks</organization>

            <address>
                <postal>
                    <street>1194 N. Mathilda Ave</street>

                    <city>Sunnyvale</city>

                    <region>CA</region>

                    <code>94089</code>

                    <country>US</country>
                </postal>

                <email>prz@juniper.net</email>

            </address>
        </author>

<author fullname="Alankar Sharma" initials="A"
    surname="Sharma">

    <organization>Comcast</organization>

    <address>
        <postal>
            <street>1800 Bishops Gate Blvd</street>

<city>Mount Laurel</city>

<region>NJ</region>

<code>08054</code>

<country>US</country>
        </postal>

        <email>Alankar_Sharma@comcast.com</email>

    </address>
</author>

<author fullname="Alia Atlas" initials="A"
    surname="Atlas">

    <organization>Juniper Networks</organization>

    <address>
        <postal>
            <street>10 Technology Park Drive</street>

            <city>Westford</city>

            <region>MA</region>

            <code>01886</code>

            <country>US</country>
        </postal>

        <email>akatlas@juniper.net</email>

    </address>
    </author>

<author fullname="John Drake" initials="J"
    surname="Drake">

    <organization>Juniper Networks</organization>

    <address>
        <postal>
            <street>1194 N. Mathilda Ave</street>

            <city>Sunnyvale</city>

            <region>CA</region>

            <code>94089</code>

            <country>US</country>
        </postal>

        <email>jdrake@juniper.net</email>
        
    </address>
</author>


        <date year="2018" month="Mar" day="01"/>

        <!-- If the month and year are both specified and are the current ones, xml2rfc will fill
         in the current day for you. If only the current year is specified, xml2rfc will fill
         in the current day and month for you. If the year is not the current one, it is
         necessary to specify at least a month (xml2rfc assumes day="1" if not specified for the
         purpose of calculating the expiry date).  With drafts it is normally sufficient to
         specify just the year. -->

        <!-- Meta-data Declarations -->

        <area>Routing</area>

        <workgroup>RIFT Working Group</workgroup>

        <!-- WG name at the upperleft corner of the doc,
         IETF is fine for individual submissions.
         If this element is not present, the default is "Network Working Group",
         which is used by the RFC Editor as a nod to the history of the IETF. -->

        <!-- Keywords will be incorporated into HTML output
         files in a meta tag but they have no effect on text or nroff
         output. If you submit your draft to the RFC Editor, the
         keywords will be used for the search engine. -->

        <abstract>
            <t>This document outlines a
                specialized, dynamic routing protocol for
                Clos and fat-tree network topologies. The protocol
                (1) deals with automatic construction of fat-tree topologies based
                on detection of links, (2) minimizes the amount of routing
                state held at each level, (3) automatically prunes the topology
                distribution exchanges to a sufficient subset of links,
                (4) supports
                automatic disaggregation of prefixes on link and node failures to
                prevent black-holing and suboptimal routing,
                (5) allows traffic steering and
                re-routing policies, (6) allows non-ECMP forwarding,
                (7) automatically re-balances traffic towards the spines based on
                 bandwidth available and ultimately (8) provides
                mechanisms to synchronize a limited key-value data-store that
                can be used after protocol convergence to e.g.
                bootstrap higher levels of functionality on nodes.
            </t>
        </abstract>
    </front>

    <middle>
        <section title="Introduction">
            <t><xref
                target="CLOS">Clos</xref> and <xref
                    target="FATTREE">Fat-Tree</xref>
                have gained prominence in today's networking, primarily as 
                result of
                the paradigm shift towards a centralized data-center based
                architecture that is poised to deliver a majority of
                computation and storage services
                in the  future.
Today's routing protocols were geared  towards a
network with an irregular topology and low degree of connectivity originally
but given
they were the only available mechanisms, consequently
several
attempts to apply those to Clos have been made.
                Most successfully
                <xref
                    target="RFC4271">BGP</xref> <xref
                        target="RFC7938"></xref>
                    has been extended to this purpose, not as much due to its
                    inherent suitability to solve the problem but rather because the
                    perceived capability to modify it "quicker" and the
                    immanent difficulties
                    with <xref
                        target="DIJKSTRA">link-state</xref> based protocols
                    to perform in large scale densely meshed topologies.
            </t>
            <t>
                In looking at the problem through the lens of its requirements
                an optimal approach does not seem however to be a simple
                modification of either a link-state (distributed computation)
                or distance-vector (diffused computation) approach
                but rather a mixture of both, colloquially best described as
                "link-state towards the spine" and "distance vector towards
                the leafs". In other words, "bottom" levels are flooding their
                link-state information in the "northern" direction while
                each switch generates under normal conditions a default
            route and floods it in the "southern" direction. Obviously, such
            aggregation can blackhole in cases of misconfiguration
            or failures
            and this has to be addressed somehow.</t>

<t>For the visually oriented reader, <xref target="first-simple"/> presents a
    first simplified view of the
    resulting information and routes on a RIFT fabric. The top of the fabric
    is holding in its link-state database
    the nodes below it and routes to them. In the second row of the database
    we indicate that
    a partial information of other nodes in the same level is available as well; the
    details of how this is achieved should be postponed for the moment.
    Whereas when we look at the "bottom" of the fabric we see that the topology of
    the leafs is basically empty and they only hold a load balanced default
    route to the next level.
    </t>

                <t>The balance of this document details
                the resulting protocol and fills in the missing details.

            </t>

                <t>
                    <figure align="center" anchor="first-simple"
                        title="RIFT information distribution">
                        <artwork align="center"><![CDATA[
.                                  [A,B,C,D]
.                                  [E]
.             +-----+      +-----+
.             |  E  |      |  F  | A/32 @ A
.             +-+-+-+      +-+-+-+ B/32 @ B
.               | |          | |   C/32 @ C
.               | |    +-----+ |   D/32 @ D
.               | |    |       |
.               | +------+     |
.               |      | |     |
.       [A,B] +-+---+  | | +---+-+ [A,B]
.       [D]   |  C  +--+ +-+  D  | [C]
.             +-+-+-+      +-+-+-+
.  0/0  @ [E,F] | |          | |   0/0  @ [E,F]
.  A/32 @ A     | |    +-----+ |   A/32 @ A
.  B/32 @ B     | |    |       |   B/32 @ B
.               | +------+     |
.               |      | |     |
.             +-+---+  | | +---+-+
.             |  A  +--+ +-+  B  |
. 0/0 @ [C,D] +-----+      +-----+ 0/0 @ [C,D]
                        ]]>
                        </artwork>
                    </figure>
                    
                </t>


            <section title="Requirements Language">
                <t>The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
                    "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
                    document are to be interpreted as described in <xref
                        target="RFC2119">RFC 2119</xref>.</t>
            </section>

        </section>

        <section title="Reference Frame">


<section title="Terminology" toc="default" anchor="glossary">


    <t>
        This section presents the terminology used in this document.
        It is assumed that the reader is thoroughly familiar with the
        terms and concepts used in <xref target="RFC2328">OSPF</xref>
        and <xref target="RFC1142">IS-IS</xref>, <xref target="ISO10589"/>
        as well as the according
        graph theoretical concepts of shortest path first <xref
            target="DIJKSTRA">(SPF)</xref> computation and directed
        acyclic graphs (DAG).
    </t>

    <t>

        <list style='hanging'>

            <t hangText="Level:"> Clos and Fat Tree networks are
                trees and 'level' denotes the set of nodes at the
                same height
                in such a network, where the bottom level is level
                0.

                A node has links to nodes one level down and/or one level up.
                Under some circumstances, a node may have links to nodes at
                the same level.

            As footnote: Clos terminology
                uses often the concept of "stage" but due to the
                folded nature of the Fat Tree we do not use it
                to prevent misunderstandings.</t>

            <t hangText="Spine/Aggregation/Edge Levels:">
                Traditional names for Level 2, 1 and 0
                respectively. Level 0 is often called leaf
                as well.</t>

            <t hangText="Point of Delivery (PoD):">A self-contained
                vertical slice of a Clos or Fat Tree network
                containing normally only level 0
                and level 1 nodes.  It communicates with
                nodes in other PoDs via the spine. We number PoDs to
                distinguish them and use PoD #0 to denote "undefined" PoD.
            </t>


            <t hangText="Spine:">
                The set of nodes that provide inter-PoD communication.
                These nodes are also organized into levels (typically
                one, three, or five levels). Spine nodes do not belong
                to any PoD and are assigned the PoD value 0 to indicate
                this.
            </t>

            <t hangText="Leaf:">A node without southbound adjacencies. Its level
                is 0 (except cases where it is deriving its level via ZTP and
                is running without
                LEAF_ONLY which will be explained in <xref target="ZTP"/>).
            </t>


            <t hangText="Connected Spine:"> In case a spine level
                represents
                a connected graph (discounting links terminating at
                different levels), we call it a "connected spine",
                in case
                a spine level consists of multiple partitions,
                we call
                it a "disconnected" or "partitioned spine".
                In other terms, a spine without east-west links is
                disconnected and is the typical configuration forf
                Clos and Fat
                Tree networks.
            </t>

            <t hangText="South/Southbound and North/Northbound (Direction):">
                When describing protocol
                elements and procedures,
                we will be
                using in different situations the directionality
                of the compass. I.e., 'south' or 'southbound' mean
                moving
                towards the bottom of the Clos or Fat Tree network
                and 'north' and 'northbound' mean moving towards
                the top of the Clos or Fat Tree network.

            </t>

            <t hangText="Northbound Link:">
                A link to a node one level up or in other words, one
                level further north.
</t>

<t hangText="Southbound Link:">
    A link to a node one level down or in other words, one
    level further south.
    </t>

            <t hangText="East-West Link:">A link between
                two nodes at the same level. East-west
                links are normally not part of Clos or
                "fat-tree" topologies.
            </t>

            <t hangText="Leaf shortcuts (L2L):"> East-west links at
                leaf level
                will need to be differentiated from East-west links at
                other levels.
            </t>

            <t hangText="Southbound representation:">Information sent
                towards a lower level
                representing only limited amount of information.
            </t>

            <t hangText="TIE:">This is an acronym for a "Topology
                Information Element". TIEs are exchanged between RIFT nodes to
                describe parts of a network such as links and address prefixes.
                It can be thought of as
                largely equivalent to ISIS LSPs or OSPF LSA. We will talk about
                N-TIEs when talking about TIEs in the northbound representation
                and S-TIEs for the southbound equivalent.
            </t>

            <t hangText="Node TIE:">This is an acronym for a
                "Node Topology Information Element",
                largely equivalent to OSPF Node LSA, i.e. it contains all neighbors
                the node discovered and
                information about node itself.
            </t>

            <t hangText="Prefix TIE:">This is an acronym for a "Prefix Topology
                Information Element" and it contains all prefixes
                directly attached to
                this node in case of a N-TIE and in case of S-TIE the necessary
                default and de-aggregated prefixes the node passes
                southbound.
            </t>

<t hangText="Policy-Guided Information:">Information that
    is passed in either
    southbound direction or north-bound direction
    by the means of diffusion and can be filtered via
    policies. Policy-Guided Prefixes and KV Ties are examples
    of Policy-Guided Information.</t>

            <t hangText="Key Value TIE:">A S-TIE that is carrying a set of
                key value pairs <xref target="DYNAMO"/>.
                It can be used to distribute information in the southbound
                direction within
                the protocol.
            </t>

            <t hangText="TIDE:">Topology Information Description Element,
                equivalent to CSNP in ISIS.</t> <t hangText="TIRE:">Topology
                    Information Request Element, equivalent to PSNP in ISIS. It
                    can both
                    confirm received and request missing TIEs.</t>

                <t hangText="PGP:">Policy-Guided Prefixes allow to support
                    traffic engineering that cannot be achieved by the means
                    of SPF computation or normal node and prefix S-TIE
                    origination. S-PGPs are propagated in south direction
                only and N-PGPs follow northern direction strictly.</t>

                <t hangText="De-aggregation/Disaggregation:">Process in
                    which a node
                    decides to
                    advertise certain prefixes it received in N-TIEs to
                    prevent black-holing and suboptimal routing upon
                    link failures.</t>

                <t hangText="LIE:">This is an acronym for a
                    "Link Information Element",
                    largely equivalent to HELLOs in IGPs and exchanged over
                    all the links between systems running RIFT to form adjacencies.
                </t>
                
                <t hangText="FL:">Flooding Leader for a specific system has a
                    dedicated role to flood
                    TIEs of that system.
                </t>

                <t hangText="BAD:">This is an acronym for Bandwidth
                    Adjusted Distance. RIFT
                    calculates the amount of northbound bandwidth
                    available for a node
                    compared to other nodes at the same level and
                    adjusts the default
                    route distance accordingly to allow for the lower level to
                    weight their forwarding load balancing.</t>

                <t hangText="Overloaded:">Applies to a node advertising
                    `overload` attribute as set. The semantics closely
                follow the meaning of the same attribute
                    in <xref target="RFC1142"/>.</t>
        </list>
    </t>
</section>

            <section title="Topology">

                <t>
                    <figure align="center" anchor="pic-topo-three"
                        title="A two level spine-and-leaf topology">
                        <artwork align="center"><![CDATA[
.                +--------+          +--------+
.                |        |          |        |          ^ N
.                |Spine 21|          |Spine 22|          |
.Level 2         ++-+--+-++          ++-+--+-++        <-*-> E/W
.                 | |  | |            | |  | |           |
.             P111/2|  |P121          | |  | |         S v
.                 ^ ^  ^ ^            | |  | |
.                 | |  | |            | |  | |
.  +--------------+ |  +-----------+  | |  | +---------------+
.  |                |    |         |  | |  |                 |
. South +-----------------------------+ |  |                 ^
.  |    |           |    |         |    |  |              All TIEs
.  0/0  0/0        0/0   +-----------------------------+     |
.  v    v           v              |    |  |           |     |
.  |    |           +-+    +<-0/0----------+           |     |
.  |    |             |    |       |    |              |     |
.+-+----++ optional +-+----++     ++----+-+           ++-----++
.|       | E/W link |       |     |       |           |       |
.|Node111+----------+Node112|     |Node121|           |Node122|
.+-+---+-+          ++----+-+     +-+---+-+           ++---+--+
.  |   |             |   South      |   |              |   |
.  |   +---0/0--->-----+ 0/0        |   +----------------+ |
. 0/0                | |  |         |                  | | |
.  |   +---<-0/0-----+ |  v         |   +--------------+ | |
.  v   |               |  |         |   |                | |
.+-+---+-+          +--+--+-+     +-+---+-+          +---+-+-+
.|       |  (L2L)   |       |     |       |  Level 0 |       |
.|Leaf111~~~~~~~~~~~~Leaf112|     |Leaf121|          |Leaf122|
.+-+-----+          +-+---+-+     +--+--+-+          +-+-----+
.  +                  +    \        /   +              +
.  Prefix111   Prefix112    \      /   Prefix121    Prefix122
.                          multi-homed
.                            Prefix
.+---------- Pod 1 ---------+     +---------- Pod 2 ---------+
                        ]]>
                        </artwork>
                    </figure>

                </t>

                <t>
                    We will use this topology (called commonly a fat
                    tree/network in modern DC considerations
                    <xref target="VAHDAT08"/>
                    as homonym to the
                    <xref target="FATTREE">original definition of the term</xref>)
                        in all further considerations.
                    It depicts
                    a generic "fat-tree" and the concepts explained in
                    three levels here
                    carry by induction for further levels and higher degrees
                    of connectivity. However, this document will deal
                    with designs that
                    provide only sparser connectivity as well.
                </t>

            </section>

        </section>

        <section anchor="reqs" title="Requirement Considerations">
            <t>
                <xref
                    target="RFC7938"></xref> gives the original set of requirements
                augmented here based upon recent experience in the
                operation of fat-tree
                networks.
            </t>
            <t>
                <list style='format REQ%d: ' >
                    <t>The control protocol should discover the physical
                        links automatically
                        and be able to detect cabling that
                        violates fat-tree topology constraints.
                        It must react accordingly to such mis-cabling attempts,
                        at a minimum
                        preventing adjacencies between nodes from being
                        formed and traffic
                        from being forwarded on those mis-cabled links. E.g.
                        connecting a leaf to a spine at level 2 should be
                        detected and ideally prevented.

                    </t>
                    <t>A node without any configuration beside default values
                        should come up at the correct level
                        in any PoD it is introduced into. Optionally,
                        it must be possible to
                        configure nodes to restrict their participation to
                        the PoD(s) targeted at any level.
                    </t>
                    <t>Optionally, the protocol should allow to provision data
                        centers where the
                        individual
                        switches carry no configuration information and are
                        all deriving their
                        level from a "seed". Observe that this requirement
                        may collide with the desire
                        to detect cabling misconfiguration and with that
                        only one of the requirements
                        can be fully met in a chosen configuration mode.
                    </t>

                    <t>
                        The solution should allow for minimum size routing
                        information base and forwarding
                        tables at leaf level for speed, cost and simplicity
                        reasons. Holding excessive amount of information away
                        from leaf nodes simplifies operation and lowers cost of
                        the underlay.
                    </t>
                    <t>Very high degree of ECMP must be
                        supported. Maximum ECMP is currently understood as the most
                        efficient
                        routing approach to maximize the throughput of switching
                        fabrics <xref target="MAKSIC2013"/>.
                    </t>
                    <t>Non equal cost anycast must be supported to allow for
                        easy and robust multi-homing of services without regressing to
                        careful balancing of link costs.
                        </t>
                    <t>Traffic engineering should be allowed by modification of
                        prefixes and/or their next-hops.
                    </t>

                    <t>The solution should allow for access to link states of
                        the whole topology
                        to enable efficient support for modern control
                        architectures like <xref
                            target="RFC7855">SPRING</xref> or
                        <xref target="RFC4655">PCE</xref>.
                    </t>
                    <t>The solution should easily accommodate opaque data to
                        be carried throughout the topology to subsets of nodes.
                        This can be used
                        for many purposes, one of them being a key-value
                        store that allows
                        bootstrapping of nodes based right at the time of
                        topology discovery.
                    </t>
                    <t>Nodes should be taken out and introduced into production
                        with minimum
                        wait-times and minimum of "shaking" of the network, i.e.
                        radius of propagation (often called "blast radius")
                        of changed information should be as small as feasible.
                    </t>
                    <t>The protocol should allow for maximum aggregation of carried
                        routing information while at the same time automatically
                        de-aggregating
                        the prefixes to prevent black-holing in case of failures.
                        The de-aggregation
                        should support maximum possible ECMP/N-ECMP remaining
                        after failure.
                    </t>
                    <t>Reducing the scope of communication needed throughout
                        the network on link and state
                        failure, as well as reducing advertisements of
                        repeating, idiomatic or policy-guided information in
                        stable state is highly desirable since it leads to
                        better stability and faster convergence behavior.

    </t>
                    <t>Once a packet traverses a link in a "southbound" direction,
                        it must not take any further "northbound"
                        steps along its path to
                        delivery to its destination under normal conditions.
                        Taking a path
                        through the spine in cases where a shorter
                    path is available is highly undesirable. </t>

<t>
    Parallel links between same set of
    nodes must be distinguishable for SPF, failure and traffic engineering
    purposes. </t>

<t> The protocol must not rely on interfaces having
    discernible unique addresses, i.e. it must operate in presence of
    unnumbered links (even parallel ones) or links of a single node
    having same addresses.</t>
<t>It would be desirable to achieve fast re-balancing of flows when links,
    especially towards the spines are lost or provisioned without regressing to
    per flow traffic engineering which introduces significant amount of complexity
    while possibly not being reactive enough to account for short-lived flows.
    </t>
                </list>
</t>


<t>
            Following list represents possible requirements and requirements under
            discussion:

            </t>

<t>
    <list style='format PEND%d: ' >
        <t>Supporting anything but point-to-point links is
            a non-requirement. Questions remain: for connecting to
            the leaves, is there a case where multipoint is
            desirable?  One could still model it as
            point-to-point links; it seems  there is no need for
            anything more than a NBMA-type construct.
        </t>

        <t>What is the maximum scale of number leaf prefixes we need to carry.
            Is 500'000 enough ?
        </t>

</list>
    </t>

<t>
    Finally, following are the non-requirements:
</t>
<t>
    <list style='format NONREQ%d: ' >
        <t>Broadcast media support is unnecessary.
            </t>

        <t>Purging is unnecessary given its fragility and complexity and
            today's large memory size on
            even modest switches and routers.
            </t>

        <t>Special support for layer 3 multi-hop adjacencies is not part of
            the protocol specification. Such support
            can be easily provided by using tunneling technologies the same
            way IGPs today are solving the problem.
            </t>

    </list>
</t>

        </section>

        <section title="RIFT: Routing in Fat Trees">

            <t>
                Derived from the above requirements we present a
                detailed outline of a protocol optimized for Routing
                in Fat Trees (RIFT) that in most abstract terms has
                many properties of a modified link-state protocol
                <xref target="RFC2328"></xref><xref
                target="RFC1142"></xref> when "pointing north" and
                path-vector <xref target="RFC4271"></xref> protocol
                when "pointing south". Albeit an unusual combination,
                it does quite naturally exhibit the desirable properties
                we seek.
            </t>


            <section title="Overview">
<t>
    The singular property of RIFT is that it floods northbound  "flat"
    link-state information so that each level understands the full
    topology of levels south of it. In contrast, in the southbound
    direction the protocol operates like a path vector protocol or
    rather a distance vector with implicit split horizon since the
    topology constraints make a diffused computation front propagating
    in all directions unnecessary.
    </t>

<t>To account for the "northern" and the "southern" information split the link
    state database is partitioned into "north representation" and "south representation" TIEs,
    whereas in simplest terms the N-TIEs contain a link state topology description
    of lower levels and
    and S-TIEs carry simply default routes. This oversimplified
    view will be refined gradually in following sections while introducing
    protocol procedures aimed to fulfill the described requirements.
    </t>

            </section>

            <section title="Specification">

                <section title="Transport">
                    <t>All protocol elements are carried over UDP. Once QUIC <xref
    target="QUIC"></xref> achieves the desired stability in deployments it may
prove a valuable candidate for TIE transport.
</t>

                    <t>All packet formats are defined in Thrift
                        models in <xref target="schema"/>.</t>

                    <t>Future versions may include a <xref target="PROTOBUF"/>
                        schema.</t>


                </section>

                <section title="Link (Neighbor) Discovery (LIE Exchange)" anchor="LIE">

<t>LIE
    exchange happens over well-known administratively
    locally scoped IPv4 multicast address <xref target="RFC2365"/>
    or link-local multicast scope for IPv6 <xref target="RFC4291"/>
    and SHOULD be sent with a TTL of 1 to prevent RIFT information
    reaching beyond a single L3 next-hop in the topology.  LIEs are
    exchanged over all links running RIFT.
    </t>
                    <t>
                    Unless <xref target="ZTP"/> is used, each node is
                    provisioned with the level at which it
                    is operating and its PoD (or otherwise
                    a default level
                    and "undefined" "PoD are assumed; meaning that leafs
                    do not need to be configured at all).  Nodes in
                    the spine are configured with an "undefined" PoD.
                    This information is propagated
                    in the LIEs
                    exchanged.
                    </t>

                    <t>A node tries to form a three way adjacency
                        if and only if (definitions of LEAF_ONLY are found in
                    <xref target="ZTP"/>)
                    </t>

                    <t><list style="numbers">
                        <t>the node is in the same PoD or either the node or
                            the neighbor
                            advertises "undefined" PoD membership (PoD# = 0) AND</t>
                                                <t>the neighboring node is
                                                    running the same MAJOR
                            schema version AND</t>
                        <t anchor="samepod">the neighbor is not member
                            of some PoD while the node
                            has a northbound adjacency already joining another
                            PoD AND</t>
                        <t>the neighboring node uses a valid System ID AND</t>
                        <t>the neighboring node uses a different System ID than the node
                            itself</t>
                        <t>the advertised MTUs match on both sides AND</t>
                        <t>both nodes advertise defined level values AND</t>
                        <t anchor="topmustHAL">[<list style="empty">
                            <t anchor="mustHAL">i) the node is at level 0 and has
                                no three way
                                adjacencies already
                                to nodes with level higher than the neighboring
                                node OR
                            </t>
                            <t>ii) the neighboring node is at level 0 OR</t>
                            <t >iii) both nodes are at level 0 AND both indicate
                                support for
                                <xref target="leaf2leaf"/> OR</t>
                            <t>iii) neither node is at level 0 and the
                                neighboring node is at most one level away
                            </t>
                        </list>].
                        </t>

                    </list>
                    </t>

                    <t>Rule in <xref target="samepod"/> MAY be optionally disregarded
                        by a node if PoD detection is undesirable or has to be
                        disregarded.</t>

                    <t>A node configured with "undefined" PoD membership MUST,
                        after building first northbound adjacency making
                        it participant in a PoD, advertise that PoD
                        as part of its LIEs.
                        </t>

                    <t>LIEs arriving with a TTL larger than 1 MUST be ignored.</t>

                    <t>A node SHOULD NOT send out LIEs without defined level
                        in the header but in certain scenarios it may
                        be beneficial for trouble-shooting purposes.</t>

<!--
                    <t>[Alia ???] I'm not certain that the PoD
                    approach is general enough to handle more than a
                    3-level CLOS. What about the following instead?  A
                    node is provisioned with its Level, its
                    number/location in that level, and the
                    nodes-at-level/upper-level and
                    nodes-at-level/lower-level.  (assume node-id
                    counting starts at 1, not 0) For example, say a
                    node is L1-node10 and upper-assignment is 8 and
                    lower-assignment is 16.  Then the node knows to
                    connect to L2-node2 and to accept connectsions
                    from L0-node161 to L0-node176.  For east-west
                    links, that may need another parameter to indicate
                    whether arbitrary east-west links are ok or only
                    links to immediate neighbors or...  Thoughts?
                    </t>
-->

                    <t>LIE exchange uses three-way handshake mechanism
                    <xref target="RFC5303"></xref>. Precise finite state
                    machines will be provided in later versions of this
                    specification. LIE packets
                    contain nonces and may contain an SHA-1 <xref target="RFC6234"/>
                    over nonces and
                    some of the LIE data which prevents corruption and
                    replay attacks. TIE flooding reuses those nonces to prevent
                    mismatches and can use those for security purposes
                    in case it is using QUIC <xref target="QUIC"></xref>.
                    <xref target="security"/> will address the precise security
                        mechanisms in
                        the future.

                    </t>



                </section>

                <section title="Topology Exchange (TIE Exchange)">

                    <section title="Topology Information Elements">
                        <t>Topology and reachability information in RIFT is
                            conveyed by the means of TIEs which have good
                            amount of commonalities with LSAs in OSPF.

</t>
                        <t>TIE exchange
                            mechanism uses
                            port indicated by each node in the LIE
                            exchange and the interface on which the adjacency has been
                            formed as destination. It SHOULD use TTL of 1 as well.
                            </t>

                            <t>TIEs
                            contain sequence numbers, lifetimes and a type.
                            Each type has a large identifying number space
                            and information
                            is spread across possibly many TIEs of a certain
                            type by the means of a hash function that a node
                            or deployment can individually determine. One extreme
                            point of the design space is a prefix per TIE which leads to
                            BGP-like behavior vs. dense packing into few TIEs
                            leading to more traditional IGP trade-off with fewer
                            TIEs. An implementation may even rehash
                            at the cost of significant amount of re-advertisements
                            of TIEs. </t>


                        <t>More information about the TIE structure can be
                            found in the schema in <xref target="schema"/>.
                            </t>

                        </section>

                    <section title="South- and Northbound Representation">
                            <t>As a central concept to RIFT, each node represents
                                itself differently depending on the direction in
                                which it is advertising information.

                                More precisely,
                                a spine node represents two different databases
                                to its neighbors
                                depending whether it advertises TIEs to the
                                north or to the south/sideways.

                                We call those differing TIE databases
                                either south- or
                                northbound (S-TIEs and N-TIEs)
                                depending on the direction of distribution.
                           </t>

<t> The N-TIEs hold all of the node's adjacencies, local
    prefixes and northbound policy-guided prefixes while the
    S-TIEs hold only all of the node's adjacencies and the
    default prefix with necessary disaggregated prefixes and
    southbound policy-guided prefixes. We will explain this in
    detail further in <xref target="dissagregate"/> and <xref
    target="sec-pgp"/>.
</t>

<t>The TIE types are symmetric in both directions and <xref target="tie-types"/>
    provides a quick reference to the different TIE types including direction
    and their function.
    </t>

<texttable anchor="tie-types"
    title="TIE Types"
    style="all">

    <ttcol>TIE-Type</ttcol><ttcol>Content</ttcol>

    <c>node N-TIE</c>         <c>node properties, adjacencies and information
                                    helping in complex disaggregation scenarios</c>
    <c>node S-TIE</c>        <c>same content as node N-TIE except the information
                                    to help disaggregation</c>
    <c>Prefix N-TIE</c>             <c>contains nodes' directly reachable prefixes</c>
    <c>Prefix S-TIE</c>            <c>contains originated defaults and
                                        de-aggregated prefixes</c>
<c>PGP N-TIE</c>          <c>contains nodes north PGPs</c>
<c>PGP S-TIE</c>           <c>contains nodes south PGPs</c>
<c>KV  N-TIE</c>            <c>contains nodes northbound KVs</c>
<c>KV  S-TIE</c>          <c>contains nodes southbound KVs</c>
    </texttable>

<t>As an example illustrating a databases holding both
    representations, consider the
    topology in <xref target="pic-topo-three"/> with the optional
    link between node 111 and node 112 (so that the flooding on an
    east-west link can be shown). This example assumes unnumbered
    interfaces.  First, here are the TIEs generated by some
    nodes. For simplicity, the key value elements and the
    PGP elements which may be included in their S-TIEs
    or N-TIEs are not shown.</t>

<figure align="center" anchor="ties-topo-three"
    title="example TIES generated in a 2 level spine-and-leaf topology">
    <artwork align="left"><![CDATA[

        Spine21 S-TIEs:
        Node S-TIE:
          NodeElement(layer=2, neighbors((Node111, layer 1, cost 1),
          (Node112, layer 1, cost 1), (Node121, layer 1, cost 1),
          (Node122, layer 1, cost 1)))
        Prefix S-TIE:
          SouthPrefixesElement(prefixes(0/0, cost 1), (::/0, cost 1))

        Node111 S-TIEs:
        Node S-TIE:
          NodeElement(layer=1, neighbors((Spine21, layer 2, cost 1, links(...)),
          (Spine22, layer 2, cost 1, links(...)),
          (Node112, layer 1, cost 1, links(...)),
          (Leaf111, layer 0, cost 1, links(...)),
          (Leaf112, layer 0, cost 1, links(...))))
        Prefix S-TIE:
          SouthPrefixesElement(prefixes(0/0, cost 1), (::/0, cost 1))

        Node111 N-TIEs:
        Node N-TIE:
          NodeElement(layer=1,
          neighbors((Spine21, layer 2, cost 1, links(...)),
          (Spine22, layer 2, cost 1, links(...)),
          (Node112, layer 1, cost 1, links(...)),
          (Leaf111, layer 0, cost 1, links(...)),
          (Leaf112, layer 0, cost 1, links(...))))
        Prefix N-TIE:
          NorthPrefixesElement(prefixes(Node111.loopback)

        Node121 S-TIEs:
        Node S-TIE:
          NodeElement(layer=1, neighbors((Spine21,layer 2,cost 1),
          (Spine22, layer 2, cost 1), (Leaf121, layer 0, cost 1),
          (Leaf122, layer 0, cost 1)))
        Prefix S-TIE:
          SouthPrefixesElement(prefixes(0/0, cost 1), (::/0, cost 1))

        Node121 N-TIEs:
        Node N-TIE:
          NodeLinkElement(layer=1,
          neighbors((Spine21, layer 2, cost 1, links(...)),
          (Spine22, layer 2, cost 1, links(...)),
          (Leaf121, layer 0, cost 1, links(...)),
          (Leaf122, layer 0, cost 1, links(...))))
        Prefix N-TIE:
          NorthPrefixesElement(prefixes(Node121.loopback)

        Leaf112 N-TIEs:
        Node N-TIE:
          NodeLinkElement(layer=0,
          neighbors((Node111, layer 1, cost 1, links(...)),
          (Node112, layer 1, cost 1, links(...))))
        Prefix N-TIE:
          NorthPrefixesElement(prefixes(Leaf112.loopback, Prefix112,
          Prefix_MH))
    ]]>
    </artwork>
</figure>
                    </section>

<section title="Flooding">
<t>
The  mechanism used to distribute TIEs is the well-known (albeit
modified in several
respects to address fat tree requirements) flooding mechanism used by
today's link-state protocols.
Albeit initially more demanding to implement it avoids many problems with
diffused computation
update style used by path vector.
As described before, TIEs themselves are transported over UDP with the
ports indicates in the LIE
    exchanges and using the destination address
    (for unnumbered IPv4 interfaces same considerations
    apply as in equivalent OSPF case)
    on which the LIE adjacency
    has been formed.</t>

<t>On reception of a TIE with an undefined level value in the packet header
    the node SHOULD issue a warning and indiscriminately discard the packet.</t>

<t>Precise finite state
    machines and procedures
    will be provided in later versions of this
    specification.</t>

</section>

                        <section title="TIE Flooding Scopes" anchor="tiescopes">


<t>In a somewhat analogous fashion to link-local, area and domain flooding scopes,
RIFT defines several complex "flooding scopes" depending on the direction and type of TIE
propagated.</t>

<t>Every N-TIE is flooded northbound, providing a node at a given level with the
    complete topology of
    the Clos or Fat Tree network underneath it, including all specific prefixes.
    This means that a packet
    received from a node at the same or lower level whose destination is covered
    by one of those specific
    prefixes may be routed directly towards the node advertising that prefix
    rather than sending
    the packet to a node at a higher level.</t>

<t>A node's node S-TIEs, consisting of all node's adjacencies and prefix S-TIEs
    with default IP
    prefix and disaggregated prefixes, are flooded southbound in order to allow
the nodes one level down to see connectivity of the higher level as well
as reachability to the rest of the fabric.  In
order to allow a E-W disconnected node in
a given level to receive the S-TIEs of other nodes at its level, every *NODE*
S-TIE is "reflected" northbound to level from which it was
received. It should be noted that east-west links are included in
South TIE flooding;
those TIEs need to be flooded to satisfy algorithms in <xref target="calculate"/>.
In that way nodes at same level can learn about each other
without a lower level, e.g. in case of leaf level.
The precise flooding scopes are given in <xref target="tie-tire-tide-scopes"/>.
Those rules govern
as well what SHOULD be included in TIDEs towards neighbors. East-West flooding scopes are
identical to South flooding scopes.
</t>

<t>Node S-TIE "reflection" allows to support disaggregation on failures describes
    in <xref target="dissagregate"/> and flooding reduction in <xref target="reduce"/>.
</t>

<texttable anchor="tie-tire-tide-scopes"
    title="Flooding Scopes"
    style="all">

<ttcol>Packet Type vs. Peer Direction</ttcol> <ttcol>South</ttcol><ttcol>North</ttcol>

    <c>node S-TIE</c>     <c>flood self-originated only</c>
    <c>flood if TIE originator's level is higher
                                                        than own level</c>  
    <c>non-node S-TIE</c>    <c>flood self-originated only</c>
    <c>flood only if TIE originator is equal peer</c>
    <c>all N-TIEs</c>      <c>never flood</c>       <c>flood always</c> 
    <c>TIDE</c>           <c>include TIEs
                             in flooding
                             scope</c>       <c>include TIEs
                                                in flooding
                                                scope</c>   
    <c>TIRE</c>           <c>include all N-TIEs and all peer's
                            self-originated TIEs and
                            all node S-TIEs</c>
                                            <c>include only if TIE
                                                originator is equal peer</c>
                                            
</texttable>

<t>As an example to illustrate these rules, consider using
    the topology in <xref target="pic-topo-three"/>, with the
    optional link between node 111 and node 112, and the
    associated TIEs given in <xref
    target="ties-topo-three"/>. The flooding from particular
    nodes of the TIEs is given in <xref
    target="flooding-topo-three"/>.</t>

<texttable anchor="flooding-topo-three"
    title="Flooding some TIEs from example topology"
    style="full">

    <ttcol>Router floods to</ttcol> <ttcol>Neighbor</ttcol><ttcol>TIEs</ttcol>

    <c>Leaf111</c> <c>Node112</c> <c>Leaf111 N-TIEs, Node111 node S-TIE</c>
    <c>Leaf111</c> <c>Node111</c> <c>Leaf111 N-TIEs, Node112 node S-TIE</c>
    <c></c>  <c></c>  <c></c>
    <c>Node111</c> <c>Leaf111</c> <c>Node111 S-TIEs  </c>
    <c>Node111</c> <c>Leaf112</c> <c>Node111 S-TIEs </c>
    <c>Node111</c> <c>Node112</c> <c>Node111 S-TIEs</c>

    <c>Node111</c> <c>Spine21</c> <c>Node111 N-TIEs,
        Leaf111 N-TIEs, Leaf112 N-TIEs, Spine22 node S-TIE </c>

    <c>Node111</c> <c>Spine22</c> <c>Node111 N-TIEs,
        Leaf111 N-TIEs, Leaf112 N-TIEs, Spine21 node S-TIE </c>
    <c></c>  <c></c>  <c></c>

    <c>... </c><c>...</c><c>...</c>
    <c>Spine21</c> <c>Node111</c> <c>Spine21 S-TIEs</c>
    <c>Spine21</c> <c>Node112</c> <c>Spine21 S-TIEs</c>
    <c>Spine21</c> <c>Node121</c> <c>Spine21 S-TIEs</c>
    <c>Spine21</c> <c>Node122</c> <c>Spine21 S-TIEs</c>

    <c>... </c><c>...</c><c>...</c>

</texttable>


    <!--

     

                                    <t>Flooding northbound floods all TIEs EXCEPT
                                        the S-TIEs of nodes at
                                        the same or lower
                                        levels.

                                        Flooding N-TIEs from lower levels
                                        provides all necessary
                                        information to the nodes at higher
                                        levels. Flooding S-TIEs from the
                                        higher level (based on those rules
                                        it will only the next higher one)
                                        allows a disconnected spine to see the
                                        S-TIEs of other members of its level
                                        given the level below it will reflect
                                        its S-TIEs. Flooding
                                        east-west TIEs from the same level is
                                        necessary in case the
                                        upper level is disconnected from certain
                                        nodes in a level.

                                        Leafs do not need to follow this rule
                                        and can freely flood
                                        TIEs of other leafs northbound.</t>

                                    <t >
                                        Southbound links are where the really
                                        interesting changes
                                        happen since here the link-state becomes
                                        de-facto a
                                        "one-hop distance vector" protocol. A
                                        spine node starts to
                                        send on this link different TIEs than
                                        it uses on
                                        north-
                                        or eastbound links, namely its S-TIEs.
                                        They form an independent
                                        database that represents ONLY the
                                        node's neighbors
                                        and a default IP prefix. Node's S-TIEs
                                        MUST NEVER be flooded
                                        northbound and MUST be simply dropped
                                        on reception on a
                                        southbound link if they do not come
                                        from the node's own
                                        level, i.e. have been reflected by a
                                        lower level.
                                    </t>


     -->

                        </section>


                        <section
                            title="Initial and Periodic Database Synchronization">
                            <t>The initial exchange of RIFT is modeled after
                                ISIS with TIDE being equivalent to CSNP and
                                TIRE playing the role of PSNP. The content of
                                TIDEs and TIREs is governed
                                by <xref target="tie-tire-tide-scopes"/>.
                                </t>
                            </section>

                        <section
                            title="Purging">

<t>
RIFT does not purge information that has been distributed by the
protocol.  Purging mechanisms in other routing protocols have proven
 to be complex and fragile over many years of experience.

                            Abundant amounts of memory are
                                available
                                today even on low-end platforms. The
                                information will age out and all computations
                                will deliver correct results if a node
                                leaves the network due
                                to the new information distributed by its adjacent
                                nodes.
                                </t>
                            <t>Once a RIFT node issues a TIE with an ID, it MUST
                                preserve the ID as long as feasible (also when
                                the protocol restarts), even if the TIE
                                looses
                                all content. The re-advertisement of empty TIE
                                fulfills the purpose of purging any information
                                advertised in previous versions. The originator
                                is free to not re-originate the according empty TIE
                                again or originate an empty TIE with relatively
                                short lifetime to prevent large number of long-lived
                                empty
                                stubs polluting the network.
                                Each node
                                will timeout and clean up the according empty TIEs
                                independently.
                                </t>
                            <t>Upon restart a node MUST, as any link-state
                                implementation, be prepared to receive
                                TIEs with its own system ID and supercede them
                                with equivalent, newly generated, empty TIEs with
                                a higher sequence number. As above, the lifetime
                                can be relatively short since it only needs to
                                exceed the necessary propagation and processing
                                delay by all the nodes that are within the
                                TIE's flooding scope.
                                </t>
                            </section>

<section title="Southbound Default Route Origination" anchor="defaultrouterules">

    <t>Under certain conditions nodes issue a default route in their South Prefix TIEs with
        metrics as computed in <xref target="varydefault"/>.</t>

    <t>A node X that
        <list style='numbers' >

       <t> is NOT overloaded AND</t>
       <t>has southbound or east-west adjacencies</t>
       </list>


        originates in its south prefix TIE such a default
        route IIF

 <list style='numbers' >
        <t>all other nodes at X's' level are overloaded OR</t>
        <t>all other nodes at X's' level have NO northbound
                adjacencies
                OR</t>
        <t>X has computed reachability to a default
                route during N-SPF.</t>
</list>

        </t>

    <t>The term "all other nodes at X's' level" describes obviously
        just the nodes at the same level in the POD with a viable lower layer
        (otherwise the node S-TIEs cannot be reflected and the nodes in e.g.
        POD 1 nad POD 2 are "invisible" to each other).
</t>
    
    <t>A node originating a southbound
        default route MUST install a default discard route
        if it did not compute a default route during N-SPF.
    </t>


</section>

                        <section
                            title="Optional Automatic Flooding Reduction and Partitioning"
                            anchor="reduce">

                            <t>Several nodes can, but strictly only
                                under conditions defined below,
                                run a hashing function based on TIE originator
                                value and partition flooding
                                between them.
                            </t>
                            <t>Steps for flooding reduction and partitioning:
                            </t>



                            <t>
                                <list style='numbers' >
                                    <t>select all nodes in the same level
                                        for which
                                            <list style='letters' >
                                                <t>node S-TIEs have been
                                        received AND </t>
                                                <t>which have precisely
                                        the same non-empty sets of respectively
                                        north and
                                        south neighbor
                                        adjacencies AND</t>
                                                <t>have at least one shared
                                                    southern neighbor including
                                                    backlink verification and</t>
                                        <t> support flooding reduction (overload bits
                                        are ignored)</t>
                                        </list>
                                        and then
                                    </t>
                                    <t>run on the chosen set
                                        a hash algorithm using nodes flood
                                        priorities and IDs to select
                                        flooding leader and backup
                                        per TIE originator ID, i.e.
                                        each node floods immediately through
                                        to all its necessary neighbors
                                        TIEs that it received with an originator
                                        ID that makes it the flooding leader
                                        or backup
                                        for this originator. The preference
                                        (higher is better) is computed as

XOR(TIE-ORIGINATOR-ID&lt;&lt;1,~OWN-SYSTEM-ID)), whereas &lt;&lt; is a non-circular shift and
~ is bit-wise NOT.

                                    </t>
                                    <t>In the very unlikely case of hash
                                        collisions on either of the
                                        two nodes with highest values (i.e.
                                        either does
                                        NOT produce unique hashes as compared to
                                        all other hash values), the node running
                                        the election does not attempt to
                                        reduce flooding.
                                        </t>

                                </list>
                            </t>
                            <t>Additional rules for flooding reduction and
                                partitioning:
                                <list style="numbers">
                                    <t>A node always floods its own TIEs
                                    </t>
                                    <t>A node generates TIDEs as usual but when
                                        receiving TIREs with requests for
                                        TIEs for a node for which it is not a
                                        flooding
                                        leader or backup it
                                        ignores such TIDEs on first
                                        request only. Normally, the flooding
                                        leader should satisfy the requestor
                                        and with that no
                                        further TIREs for such TIEs will be
                                        generated. Otherwise, the next set
                                        of TIDEs and TIREs will lead to flooding
                                        independent of the
                                        flooding leader status.
                                    </t>
                                    <t>A node receiving a TIE originated by
                                        a node for which it is not a flooding
                                        leader floods such TIEs only when receiving
                                        an out-of-date
                                        TIDE for them, except for the first one.
                                    </t>
                                </list>
                            </t>
                            <t>The mechanism can be implemented optionally in each
                                node. The capability is carried in the node S-TIE
                                (and for symmetry purposes in node
                                N-TIE as well but it serves no purpose there currently).
                                </t>
                            <t>Obviously flooding reduction does NOT apply to
                                self originated TIEs. Observe further that
                                all policy-guided information consists of
                                self-originated TIEs.
                                </t>

                        </section>

                    </section>


<section anchor="sec-pgp" title="Policy-Guided Prefixes">

    <t>In a fat tree, it can be sometimes desirable to guide traffic to
        particular destinations or keep specific flows to certain paths.
        In RIFT, this is done by using policy-guided prefixes with their
        associated communities.  Each community is an abstract value whose
        meaning is determined by configuration.  It is assumed that the
        fabric is under a single administrative control so that the meaning
        and intent of the communities is understood by all the nodes in the
        fabric.  Any node can originate a policy-guided prefix. </t>

    <t>Since RIFT uses distance vector concepts in a southbound
        direction, it is straightforward to add a policy-guided prefix to
        an S-TIE.  For easier troubleshooting, the approach taken in RIFT
        is that a node's southbound policy-guided prefixes are sent in
        its S-TIE and the receiver does inbound filtering based on the
        associated communities (an egress policy is imaginable but
        would lead to different S-TIEs per neighbor possibly which
        is not considered in RIFT protocol procedures).
        A southbound policy-guided prefix can only
        use links in the south direction.  If an PGP S-TIE
        is received on an east-west or northbound link, it
        must be discarded by ingress filtering.</t>

    <t>Conceptually, a southbound policy-guided prefix guides traffic
        from the leaves up to at most the north-most layer.  It is also
        necessary to to have northbound policy-guided prefixes to guide
        traffic from the north-most layer down to the appropriate leaves.
        Therefore, RIFT includes northbound policy-guided prefixes in its
        N PGP-TIE and the receiver does inbound filtering based on the
        associated communities. A northbound policy-guided prefix can only
        use links in the northern direction.  If an N PGP TIE
        is received on an east-west or southbound link, it
        must be discarded by ingress
        filtering.</t>

    <t>By separating southbound and northbound policy-guided prefixes
    and requiring that the cost associated with a PGP is strictly
    monotonically increasing at each hop, the path cannot loop.
    Because the costs are strictly increasing, it is not possible to
    have a loop between a northbound PGP and a southbound PGP.  If
    east-west links were to be allowed, then looping could occur and
    issues such as counting to infinity would become an issue to be
    solved.  If complete generality of path - such as including
    east-west links and using both north and south links in arbitrary
    sequence - then a Path Vector protocol or a similar solution must
    be considered.</t>

    <t>If a node has received the same prefix, after ingress filtering,
        as a PGP in an S-TIE and in an N-TIE, then the
        node determines which policy-guided prefix to use based upon the
        advertised cost.</t>

    <t>A policy-guided prefix is always preferred to a regular prefix,
    even if the policy-guided prefix has a larger cost.
    <xref target="schema"/> provides normative indication of prefix preferences.

    </t>

    <t>The set of policy-guided prefixes received in a TIE is subject
        to ingress filtering and then re-originated to be sent out in the
        receiver's appropriate TIE.  Both the ingress filtering and the
        re-origination use the communities associated with the policy-guided
        prefixes to determine the correct behavior. The cost on
        re-advertisement MUST increase in a strictly monotonic fashion.</t>

    <section title="Ingress Filtering">

        <t>When a node X receives a PGP S-TIE or a PGP N-TIE that is originated
            from a node Y which does not have an adjacency with X, all PGPs in
            such a
            TIE MUST be filtered.
            Similarly, if node Y is at the same layer as node X, then X MUST
            filter out PGPs in such S- and N-TIEs to prevent loops.</t>

        <t>Next, policy can be applied to determine which
        policy-guided prefixes to accept.  Since ingress filtering is
        chosen rather than egress filtering and per-neighbor PGPs,
        policy that applies to links is done at the receiver.  Because
        the RIFT adjacency is between nodes and there may be parallel
        links between the two nodes, the policy-guided prefix is
        considered to start with the next-hop set that has all links
        to the originating node Y.
        </t>

        <t>A policy-guided prefix has or is assigned the following
            attributes:

            <list style="hanging">

                <t hangText="cost: "> This is initialized to the cost
                    received</t>

                <t hangText="community_list: "> This is initialized to the
                    list of the communities received.</t>

                <t hangText="next_hop_set: ">This is initialized to the set
                    of links to the originating node Y.</t>

            </list></t>

    </section><!-- ingress filtering -->

    <section title="Applying Policy">

        <t>The specific action to apply based upon a community is
            deployment specific.  Here are some examples of things that can be
            done with communities.  The length of a community is a 64 bits
            number and it can be written as a single field M or as a
            multi-field (S = M[0-31], T = M[32-63]) in these examples.  For
            simplicity, the policy-guided prefix is referred to as P, the
            processing node as X and the originator as Y.</t>

        <t> <list style="hanging">
            <t hangText="Prune Next-Hops: Community Required: "> For each
                next-hop in P.next_hop_set, if the next-hop does not have the
                community, prune that next-hop from P.next_hop_set.</t>

            <t hangText="Prune Next-Hops: Avoid Community: "> For each
                next-hop in P.next_hop_set, if the next-hop has the
                community, prune that next-hop from P.next_hop_set.</t>

            <t hangText="Drop if Community: ">If node X has community M, discard P.</t>

            <t hangText="Drop if not Community: ">If node X does not have
                the community M, discard P.</t>

            <t hangText="Prune to ifIndex T: ">For each next-hop in
                P.next_hop_set, if the next-hop's ifIndex is not the value T
                specified in the community (S,T), then prune that next-hop from
                P.next_hop_set.</t>

            <t hangText="Add Cost T: ">For each appearance of community S in P.community_list,
                if the node X has community S, then add T to P.cost.</t>

            <t hangText="Accumulate Min-BW T: "> Let bw be the sum of the
                bandwidth for P.next_hop_set.  If that sum is less than T, then
                replace (S,T) with (S, bw). </t>

            <t hangText="Add Community T if Node matches S: "> If the node
                X has community S, then add community T to P.community_list.</t>

        </list></t>

    </section><!-- applying policy -->

    <section anchor="sec_store_pgp" 
       title="Store Policy-Guided Prefix for Route Computation and Regeneration">

        <t>Once a policy-guided prefix has completed ingress filtering
        and policy, it is almost ready to store and use.  It is still
        necessary to adjust the cost of the prefix to account for the
        link from the computing node X to the originating neighbor
        node Y.</t>

        <t>There are three different policies that can be used:

            <list style="hanging">
                <t hangText="Minimum Equal-Cost: "> Find the lowest cost C
                    next-hops in P.next_hop_set and prune to those.  Add C to P.cost.</t>

                <t hangText="Minimum Unequal-Cost: "> Find the lowest cost C
                    next-hop in P.next_hop_set.  Add C to P.cost.</t>

                <t hangText="Maximum Unequal-Cost: "> Find the highest cost C
                    next-hop in P.next_hop_set.  Add C to P.cost.</t>
            </list></t>

        <t>The default policy is Minimum Unequal-Cost but well-known
            communities can be defined to get the other behaviors.</t>

        <t>Regardless of the policy used, a node MUST store a PGP cost
        that is at least 1 greater than the PGP cost received.  This
        enforces the strictly monotonically increasing condition that
        avoids loops.</t>

        <t>Two databases of PGPs - from N-TIEs and from
            S-TIEs are stored.  When a PGP is inserted into the
            appropriate database, the usual tie-breaking on cost is performed.
            Observe that the node retains all PGP TIEs due to normal
            flooding behavior and hence loss of the best prefix will
            lead to re-evaluation of TIEs present and re-advertisement
            of a new best PGP.</t>
        
    </section><!-- store pgps -->
    
    <section title="Re-origination">
        
        <t> A node must re-originate policy-guided prefixes and retransmit them.
            The node has its database of southbound policy-guided prefixes to
            send in its S-TIE and its database of northbound policy-guided
            prefixes to send in its N-TIE.</t>
        
        <t>Of course, a leaf does not need to re-originate southbound
            policy-guided prefixes.</t>

        
    </section><!-- re-origination -->

    <section title="Overlap with Disaggregated Prefixes">
        <t>PGPs may overlap with prefixes introduced by automatic de-aggregation.
            The topic is under further discussion. The break in
            connectivity that leads to infeasibility of a PGP is mirrored in
            adjacency tear-down and according removal of such PGPs. Nevertheless,
            the underlying link-state flooding will be likely reacting
            significantly
            faster than a hop-by-hop redistribution and with that the preference
            for PGPs may cause intermittent black-holes.
            </t>
        </section>

</section><!-- policy-guided prefixes -->


<section title="Reachability Computation" anchor="calculate">

    <t>A node has three sources of relevant information.  A node knows
        the full topology south from the received N-TIEs.  A node has the
        set of prefixes with associated distances and bandwidths from
        received S-TIEs.  A node can also have a set of PGPs.</t>

    <t>To compute reachability, a node runs conceptually a northbound
        and a southbound
        SPF.
        We call that N-SPF and S-SPF.
    </t>

    <t>Since neither computation can "loop" (with due considerations given
        to PGPs), it is
        possible to compute non-equal-cost or even
        <xref target="EPPSTEIN">k-shortest paths</xref>
        and "saturate" the fabric
        to the extent desired.
    </t>

    <section anchor="nspf" title="Northbound SPF">

        <t> N-SPF uses northbound and east-west adjacencies in North Node TIEs
            when progressing Dijkstra. Observe that this is really just
            a one hop variety since South Node TIEs are not re-flooded southbound
            beyond
            a single level (or east-west) and
            with that the computation cannot progress beyond adjacent nodes.
            </t>

        <t>Default route found when crossing an E-W link is used IIF

<list style="numbers">
             <t>the node itself does NOT have any northbound adjacencies AND</t>
            <t>the adjacent node has one or more northbound adjacencies</t>
            </list>

            This rule forms
            a "one-hop default route split-horizon" and prevents looping
            over default routes
            while allowing for "one-hop protection" of nodes that lost
            all northbound
            adjacencies.

</t>
        <t>Other south prefixes found when crossing E-W link MAY be used IIF
            <list style="numbers">

            <t>no
            north neighbors are advertising same or supersuming non-default
                prefix AND </t>
            <t>the node does not originate a non-default supersuming prefix
                itself.</t>


            </list>


    i.e. the
    E-W link can be used as the gateway of last resort for a specific prefix only.
    Using south prefixes across E-W link can be beneficial e.g.
    on automatic de-aggregation
    in pathological fabric partitioning scenarios.

            </t>


        <t>
            A detailed example can be found in <xref target="onastickexample"/>.

        </t>


        <t>For N-SPF we are using the South Node TIEs to
            find according adjacencies to verify backlink connectivity.
            Just as in case of IS-IS or OSPF, two unidirectional links are
            associated
            together to confirm bidirectional connectivity.
        </t>

    </section>


    <section anchor="sspf" title="Southbound SPF">

        <t> S-SPF uses only the
            southbound adjacencies in the south node TIEs,
            i.e. progresses towards nodes at lower levels. Observe that
            E-W adjacencies are NEVER used in the computation. This enforces the
            requirement that a packet traversing in a southbound direction must
            never change its direction.</t>
        <t>S-SPF uses northbound adjacencies in north node TIEs to verify backlink
            connectivity. </t>

    </section>

    <section anchor="ringspf" title="East-West Forwarding Within a Level">

        <t>Ultimately,  it should be observed that in presence of a "ring" of
            E-W links in a level
            neither SPF will provide a "ring protection"
            scheme since such a computation would have to deal necessarily
            with breaking of "loops" in generic Dijkstra sense;
            an application for which
            RIFT is not intended. It is outside the scope of this document
            how an underlay
            can be used to provide a full-mesh connectivity between nodes
            in the same layer
            that would allow for N-SPF to provide protection for a single
            node loosing
            all its northbound adjacencies (as long as any of the other
            nodes in the level
            are northbound connected).
        </t>

        <t>Using south prefixes over horizontal links is optional and can
            protect against pathological fabric partitioning cases that
            leave only paths to destinations that would necessitate multiple
            changes of forwarding direction between north and south.
            </t>

    </section>


</section>


<section anchor="sec_attaching_prefixes" title="Attaching Prefixes">

    <t>After the SPF is run, it is necessary to attach according prefixes.
        For S-SPF, prefixes from an N-TIE are attached to the originating node with
        that node's next-hop set and a distance equal to the prefix's cost
        plus the node's minimized path distance.  The RIFT route database, a
        set of (prefix, type=spf, path_distance, next-hop set), accumulates
        these results. Obviously, the prefix retains its type which is used
    to tie-break between the same prefix advertised with different types.</t>

    <t>In case of N-SPF prefixes from each S-TIE need to also be added to the RIFT
    route database.  The N-SPF is really just a stub so the
    computing node needs simply to determine, for each prefix in an S-TIE
    that originated from adjacent node, what next-hops to use to reach
    that node.  Since there may be parallel links, the next-hops to
    use can be a set; presence of the computing node in the associated
    Node S-TIE is sufficient to verify that at least one link has
    bidirectional connectivity.  The set of minimum cost next-hops
    from the computing node X to the originating adjacent node is determined. </t>

    <t>Each prefix has its cost adjusted before being added into the
    RIFT route database.  The cost of the prefix is set to the cost
    received plus the cost of the minimum cost next-hop to that
    neighbor.  Then each prefix can be added into the RIFT route
    database with the next_hop_set; ties are broken based upon
    type first and then distance. RIFT route preferences are normalized
    by the according thrift model type.</t>

    <t>An exemplary implementation for node X follows:

     <figure align="center" anchor="algo-attach-S-TIE-prefixes"
            title="Adding Routes from S-TIE Prefixes">
      <artwork align="left"><![CDATA[

  for each S-TIE
     if S-TIE.layer > X.layer
        next_hop_set = set of minimum cost links to the S-TIE.originator
        next_hop_cost = minimum cost link to S-TIE.originator
        end if
     for each prefix P in the S-TIE
        P.cost = P.cost + next_hop_cost
        if P not in route_database:
          add (P, type=DistVector, P.cost, next_hop_set) to route_database
          end if
        if (P in route_database) and 
             (route_database[P].type is not PolicyGuided):
          if route_database[P].cost > P.cost):
            update route_database[P] with (P, DistVector, P.cost, next_hop_set)
          else if route_database[P].cost == P.cost
            update route_database[P] with (P, DistVector, P.cost, 
               merge(next_hop_set, route_database[P].next_hop_set))
          else
            // Not preferred route so ignore
            end if
          end if
        end for
     end for
 ]]>
     </artwork>
    </figure>

    </t>

</section><!-- attaching prefixes -->

<section anchor="sec_attaching_pgps" title="Attaching Policy-Guided Prefixes">

    <t>Each policy-guided prefix P has its cost and next_hop_set
    already stored in the associated database, as specified in <xref
    target="sec_store_pgp"/>; the cost stored for the PGP is already
    updated to considering the cost of the link to the advertising
    neighbor.  By definition, a policy-guided prefix is preferred to
    a regular prefix. </t>

    <figure align="center" anchor="algo-attach-pgps"
         title="Adding Routes from Policy-Guided Prefixes">
     <artwork align="left"><![CDATA[

    for each policy-guided prefix P:
      if P not in route_database:
         add (P, type=PolicyGuided, P.cost, next_hop_set)
         end if
      if P in route_database :
          if (route_database[P].type is not PolicyGuided) or
             (route_database[P].cost > P.cost):
            update route_database[P] with (P, PolicyGuided, P.cost, next_hop_set)
          else if route_database[P].cost == P.cost
            update route_database[P] with (P, PolicyGuided, P.cost, 
               merge(next_hop_set, route_database[P].next_hop_set))
          else
            // Not preferred route so ignore
            end if
          end if
      end for
 ]]>
     </artwork>
    </figure>
    
</section><!-- attaching policy-guided prefixes -->

<section title=	"Automatic Disaggregation on Link &amp; Node Failures"
    anchor="dissagregate">

    <t>Under normal circumstances, node's S-TIEs contain
        just the adjacencies, a
        default route and policy-guided prefixes.

        However, if a node detects that its default IP
        prefix covers one or more prefixes that are reachable
        through it but not through one or
        more other nodes at the same level, then it MUST
        explicitly advertise those prefixes in an
        S-TIE.  Otherwise, some percentage of the northbound
        traffic for those prefixes would
        be sent to nodes without according reachability,
        causing it to be black-holed.
        Even when not black-holing, the resulting forwarding
        could
        'backhaul' packets through the higher level spines,
        clearly an undesirable condition affecting
        the blocking probabilities of the fabric.

    </t>
    <t>We refer to the process of advertising additional prefixes
        as 'de-aggregation' or 'dis-aggregation'.
    </t>

    <t>
        A node determines the set of prefixes needing de-aggregation
        using the following steps:

        <list style="numbers">

            <t>A DAG computation in the southern
                direction is performed first, i.e. the
                N-TIEs are used to find all of prefixes
                it can reach and the set of next-hops in
                the lower level
                for each.
                Such a computation can be
                easily performed on a fat tree by
                e.g. setting all link costs in the
                southern direction to 1 and all
                northern directions to infinity.  We
                term set of those prefixes |R, and for each prefix, r,
                in |R, we define
                its set of next-hops to be |H(r).
                Observe that policy-guided prefixes are NOT affected
                since their scope is controlled by
                configuration.

            </t>

            <t> The node uses reflected S-TIEs to find all nodes
                at the same level in the same PoD and the set of southbound
                adjacencies
                for each.  The set of nodes at the same level is termed |N and for each
                node, n, in |N, we define
                its set of southbound adjacencies to be |A(n).
            </t>

            <t>For a given r, if the intersection
                of |H(r) and |A(n), for any n, is null
                then that prefix r must be
                explicitly advertised by the node
                in an S-TIE.

                <!-- The set of reachable prefixes
                 advertised in N-TIEs for which the set
                 of possible next-hops is disjoint with
                 any of the sets of adjacencies reachable by
                 the other nodes are the disaggregated
                 prefixes.  More formally, the set
                 consists of all r in |R such that |H
                 of r is disjoint to |A for any N. -->
            </t>

            <t>Identical set of de-aggregated prefixes is flooded on each of the
                node's southbound
                adjacencies.  In accordance with the normal flooding rules for an S-TIE,
                a node at the lower level that
                receives this S-TIE will not propagate it south-bound. Neither is it
                necessary for the receiving node to
                reflect the disaggregated prefixes
                back over its adjacencies to nodes at the level from which
                it was received.
            </t>

        </list>

    </t>

    <t>To summarize the above in simplest terms: if a node detects that its
        default route encompasses
        prefixes for which one of the other nodes in its level has no
        possible next-hops in the level below,
        it has to disaggregate it to prevent black-holing or suboptimal
        routing. Hence
        a node X needs to determine if it can
        reach a different set of south neighbors than other nodes at the
        same level, which are connected to it via at least one common
        south or east-west
        neighbor.  If it can, then prefix disaggregation may be required.
        If it can't, then no prefix disaggregation is needed.
        An example of disaggregation is provided in
        <xref target="fabriccut"/>.
    </t>

    <t>A possible
        algorithm is described last:</t>
    <t>
        <list style="numbers">
            <t>Create partial_neighbors = (empty), a set of neighbors with
                partial connectivity to the node X's layer from X's perspective.
                Each entry is a list of south neighbor of X and a list of nodes
                of X.layer that can't reach that neighbor.</t>

            <t>A node X determines its set of southbound neighbors
                X.south_neighbors.</t>

            <t>For each S-TIE originated from a node Y that X has which is
                at X.layer, if Y.south_neighbors is not the same as
                X.south_neighbors but the nodes share at least one
                southern neighbor, for each neighbor N in X.south_neighbors but
                not in Y.south_neighbors, add (N, (Y)) to partial_neighbors if N
                isn't there or add Y to the list for N.</t>

            <t>If partial_neighbors is empty, then node X does not to
                disaggregate any prefixes.  If node X is advertising disaggregated
                prefixes in its S-TIE, X SHOULD remove them and re-advertise its
                according
                S-TIEs.</t>
        </list></t>

    <t>A node X computes its SPF based upon the received N-TIEs.  This
        results in a set of routes, each categorized by (prefix,
        path_distance, next-hop-set).  Alternately, for clarity in the
        following procedure, these can be organized by next-hop-set as (
        (next-hops), {(prefix, path_distance)}).  If partial_neighbors isn't
        empty, then the following procedure describes how to identify
        prefixes to disaggregate.</t>


    <!--
     <t>It is worth to observe here that
     this procedure only disaggregates prefixes when there
     is a same-level node with no connectivity to any of the next-hop south
     neighbors.  This obviously ignores concerns about load-balancing; one
     could also decide to advertise a disaggregated prefixes whenever a
     same-level node lacks connectivity to at least one next-hop.  To do
     that, the algorithm would have to adverties the aggregate link bandwidth across
     all of a node's next-hops.  Then the receiving node could accumulate
     the disaggregated prefixes and merge those with the same path_distance
     but do load-balancing among its next-hops based upon the bandwidth
     indicated.  This has a trade-off of adding more flooding - prefixes
     would be disaggregated based on a single failure instead of when
     connectivity is lost - but should give better load-balancing.  Of
     course, instead of aggregate link bandwidth, one could use link count,
     assuming all links in the fabric have the same bandwidth.</t>

     -->

    <figure align="center" anchor="algo-disaggregated-prefixes"
        title="Computation to Disaggregate Prefixes">
        <artwork align="left"><![CDATA[

            disaggregated_prefixes = {empty }
            nodes_same_layer = { empty }
            for each S-TIE
              if (S-TIE.layer == X.layer and
                  X shares at least one S-neighbor with X)
                add S-TIE.originator to nodes_same_layer
                end if
              end for

            for each next-hop-set NHS
              isolated_nodes = nodes_same_layer
              for each NH in NHS
                if NH in partial_neighbors
                  isolated_nodes = intersection(isolated_nodes,
                                                partial_neighbors[NH].nodes)
                  end if
                end for

              if isolated_nodes is not empty
                for each prefix using NHS
                  add (prefix, distance) to disaggregated_prefixes
                  end for
                end if
              end for

            copy disaggregated_prefixes to X's S-TIE
            if X's S-TIE is different
              schedule S-TIE for flooding
              end if
        ]]>
        </artwork>
    </figure>


    <t>Each disaggregated prefix is sent with the accurate path_distance.
        This allows a node to send the same S-TIE to each south neighbor.
        The south neighbor which is connected to that prefix will thus have a
        shorter path.</t>


    <t>Finally, to summarize the less obvious points partially omitted in the
        algorithms to keep them more tractable:
        <list style="numbers">
            <t>all neighbor relationships MUST perform backlink checks.
                </t>
            <t>overload bits
            as introduced in <xref target="overload"/> have to
            be respected during the computation.
            </t>

            <t>all the lower level nodes are flooded the same disaggregated
                prefixes since we don't want to build an S-TIE per node and
                complicate things unnecessarily. The PoD containing the prefix
                will prefer southbound anyway.</t>
            <t>disaggregated prefixes
                do NOT have to propagate to lower levels. With that the
                disturbance in terms of new flooding is contained to a single
                level experiencing failures only.</t>
            <t>disaggregated prefix S-TIEs are not "reflected" by the
                lower layer, i.e.
                nodes within same level do NOT need to be aware which node
                computed the need for disaggregation.
            </t>
            <t> The fabric is still
                supporting maximum load balancing properties while not trying
                to send traffic northbound unless
                necessary. </t>
        </list>
    </t>

    <t>Ultimately, complex partitions of superspine on sparsely connected
        fabrics can lead to necessity of transitive disaggregation through
        multiple layers. The topic will be described and standardized in
        later versions of this document.</t>
</section>

<section title="Optional Autoconfiguration" anchor="ZTP">

    <t>
        Each RIFT node can optionally operate in zero touch
        provisioning (ZTP)
        mode, i.e. it has no configuration (unless it is a superspine
        at the top of the topology
        or it MUST operate as leaf and/or support leaf-2-leaf procedures)
        and it will fully configure itself after being
        attached to the
        topology. Configured nodes and nodes operating in ZTP can be
        mixed and will form a valid topology if achievable.
        This section describes the necessary concepts and procedures.
    </t>

    <section title="Terminology" anchor="ZTPTerminology">

        <t>

            <list style='hanging'>

                <t hangText="Automatic Level Derivation:">Procedures which
                    allow nodes without level configured to derive it
                    automatically. Only applied if CONFIGURED_LEVEL is
                    undefined.</t>

                <t hangText="UNDEFINED_LEVEL:">An imaginary value that
                    indicates that the level has not beeen determined and has
                    not been configured. Schemas normally indicate that
                    by a missing optional value without an
                    available defined default.</t>

                <t hangText="LEAF_ONLY:">An optional configuration
                    flag that can
                    be configured on a node to make sure it never leaves the
                    "bottom of the hierarchy". SUPERSPINE_FLAG and
                    CONFIGURED_LEVEL
                    cannot be defined at the same time as this flag.
                    It implies
                    CONFIGURED_LEVEL value of 0.
                </t>

                <t hangText="CONFIGURED_LEVEL:">A level value
                    provided manually. When this is defined (i.e. it is not
                    an UNDEFINED_LEVEL)
                    the node is
                    not participating in ZTP. SUPERSPINE_FLAG
                    is ignored when this value is defined. LEAF_ONLY
                    can be set only if this value is undefined or set to 0.</t>

                <t hangText="DERIVED_LEVEL:">Level value computed via
                    automatic level derivation when
                    CONFIGURED_LEVEL is equal to
                    UNDEFINED_LEVEL.
                </t>

                <t hangText="LEAF_2_LEAF:">An optional  flag that
                    can
                    be configured on a node to make sure it supports procedures
                    defined in
                    <xref target="leaf2leaf"/>. SUPERSPINE_FLAG is ignored
                    when set at the same time
                    as this flag. LEAF_2_LEAF implies LEAF_ONLY and the according
                    restrictions.</t>

                <t hangText="LEVEL_VALUE:">In ZTP case the original
                    definition of
                    "level" in <xref target="glossary"/> is
                    both extended and relaxed. First, level is defined
                    now as LEVEL_VALUE and is the first defined value of
                    CONFIGURED_LEVEL followed by DERIVED_LEVEL. Second,
                    it is possible for nodes to be more
                    than one level apart to form adjacencies if any of the
                    nodes is at least LEAF_ONLY.</t>

                <t hangText="Valid Offered Level (VOL):">A neighbor's level
                    received
                    on a valid LIE (i.e. passing all checks for adjacency
                    formation while disregarding all clauses involving level
                    values)
                   persisting for the duration of the holdtime interval on the
                   LIE. Observe that offers from nodes offering level value
                   of 0 do not constitute VOLs (since no valid DERIVED_LEVEL
                   can be obtained from those). Offers from LIEs with
                   `not_a_ztp_offer` being true are not VOLs either.
                </t>

                <t hangText="Highest Available Level (HAL):">Highest defined
                    level value seen from all VOLs received.
                </t>

                <t hangText="Highest Adjacency Three Way (HAT):">Highest
                    neigbhor level of all the formed three way adjacencies
                    for the node.</t>

                <t hangText="SUPERSPINE_FLAG:">Configuration flag provided to
                    all superspines. LEAF_FLAG and CONFIGURED_LEVEL
                    cannot be defined at the same time as this flag.
                    It implies
                    CONFIGURED_LEVEL value of 16. In fact, it is basically a
                    shortcut for configuring same level at all superspine
                    nodes which is unavoidable since an initial 'seed' is
                    needed for
                    other ZTP nodes to derive their level in the topology.
                </t>
                
            </list>
        </t>
        
    </section> <!-- ZTPTerminology -->

<section title="Automatic SystemID Selection">

    <t>RIFT identifies each node via a SystemID which is a 64 bits wide integer.
        It is relatively simple to derive a, for all practical purposes
        collision free, value for each node on startup. As simple examples
        either system MAC and two random bytes can be used or an IPv4/IPv6
        router ID interface address recycled as System ID. The router MUST
        ensure that such
        identifier is not changing very frequently (at least not without
        sending all its TIEs with fairly short lifetimes) since otherwise the
        network may be left with large amounts of stale TIEs in other nodes
        (though this is not necessarily a serious problem if the procedures
        suggested
        in <xref target="security"/> are implemented).
    </t>

</section>

<section title="Generic Fabric Example">

<t>ZTP forces us to think about miscabled or unusually cabled fabric and
how such a topology can be forced into a "lattice" structure which
a fabric
represents (with further restrictions). Let us consider a necessary and
sufficient physical cabling in
<xref target="pic-ztp-generic"/>. We assume all nodes being in
the same PoD.</t>

<figure align="center" anchor="pic-ztp-generic"
    title="Generic ZTP Cabling Considerations">
    <artwork align="center"><![CDATA[
.        +---+
.        | A |                      s   = SUPERSPINE_FLAG
.        | S |                      l   = LEAF_ONLY
.        ++-++                      l2l = LEAF_2_LEAF
.         | |
.      +--+ +--+
.      |       |
.   +--++     ++--+
.   | E |     | F |
.   |   +-+   |   +-----------+
.   ++--+ |   ++-++           |
.    |    |    | |            |
.    | +-------+ |            |
.    | |  |      |            |
.    | |  +----+ |            |
.    | |       | |            |
.   ++-++     ++-++           |
.   | I +-----+ J |           |
.   |   |     |   +-+         |
.   ++-++     +--++ |         |
.    | |         |  |         |
.    +---------+ |  +------+  |
.      |       | |         |  |
.      +-----------------+ |  |
.              | |       | |  |
.             ++-++     ++-++ |
.             | X +-----+ Y +-+
.             |l2l|     | l |
.             +---+     +---+
    ]]>
    </artwork>
</figure>

<t>First, we need to anchor the "top" of the cabling and that's what
    the SUPERSPINE_FLAG at node A is for. Then things look smooth until
    we have to decide whether node Y is at the same level as I, J or at
    the same level as Y and consequently, X is south of it. This is
    unresolvable here until we
    "nail down the bottom" of the topology. To achieve that we use
    the the leaf flags.
    We will see further then whether Y chooses to form adjacencies to F or I, J
    successively.
    </t>

</section>

<section title="Level Determination Procedure" anchor="LDP">
    <t>A node starting up with UNDEFINED_VALUE (i.e. without a
        CONFIGURED_LEVEL or any leaf or superspine flag) MUST follow those
        additional procedures:</t>

    <t>
        <list style="numbers">
            <t>It advertises its LEVEL_VALUE on all LIEs (observe that this
                can be
                UNDEFINED_LEVEL which in terms of the schema is simply an
                omitted optional value).
            </t>
            <t>It chooses on an ongoing basis from all VOLs
                the value of MAX(HAL-1,0) as its DERIVED_LEVEL.
                The node then starts
                to advertise
                this derived level.
            </t>
            <t>A node that lost all adjacencies with HAL value
                MUST  hold
                down computation of new DERIVED_LEVEL for a short period
                of time unless it has no VOLs from southbound adjacencies.
                After the holddown expired, it MUST discard
            all received offers, recompute DERIVED_LEVEL and announce
            it to all neighbors.</t>

            <t>A node MUST reset any adjacency that has changed the level it
                is offering and is in
                three way state.</t>
            <t>A node that changed its defined level value MUST
                readvertise its own TIEs (since the new `PacketHeader` will
                contain a different level than before). Sequence number of each
                TIE MUST be increased.
                </t>
            <t>After a level has been derived the node MUST set
                the `not_a_ztp_offer` on LIEs towards all systems
                extending a VOL for HAL.
                </t>
        </list>
    </t>

    <t>A node starting with LEVEL_VALUE being 0 (i.e. it assumes a leaf
        function or has a CONFIGURED_LEVEL of 0) MUST follow those
        additional procedures:</t>
    <t>
        <list style="numbers">
            <t>It computes HAT per procedures above but does NOT
                use it to compute DERIVED_LEVEL. HAT is used to limit
                adjacency formation per <xref target="LIE"/>.</t>
        </list>
    </t>

    <t>Precise finite state
        machines will be provided in later versions of this
        specification.</t>
    
</section>

<section title="Resulting Topologies">
    <t>The procedures defined in <xref target="LDP"/> will lead to the
         RIFT topology and levels depicted in <xref target="pic-ztp-ldped"/>.</t>

    <figure align="center" anchor="pic-ztp-ldped"
        title="Generic ZTP Topology Autoconfigured">
        <artwork align="center"><![CDATA[
.        +---+
.        | As|
.        | 64|
.        ++-++
.         | |
.      +--+ +--+
.      |       |
.   +--++     ++--+
.   | E |     | F |
.   | 63+-+   | 63+-----------+
.   ++--+ |   ++-++           |
.    |    |    | |            |
.    | +-------+ |            |
.    | |  |      |            |
.    | |  +----+ |            |
.    | |       | |            |
.   ++-++     ++-++           |
.   | I +-----+ J |           |
.   | 62|     | 62|           |
.   ++--+     +--++           |
.    |           |            |
.    +---------+ |            |
.              | |            |
.             ++-++     +---+ |
.             | X |     | Y +-+
.             | 0 |     | 0 |
.             +---+     +---+
        ]]>
        </artwork>
    </figure>

<t>
In case we imagine the LEAF_ONLY restriction on Y is removed the outcome
would be very different however and result in <xref target="pic-ztp-ldped-nol"/>.
This demonstrates basically that auto configuration prevents miscabling
detection and with that can lead to undesirable effects when leafs are not
"nailed" and arbitrarily cabled.
</t>

<figure align="center" anchor="pic-ztp-ldped-nol"
    title="Generic ZTP Topology Autoconfigured">
    <artwork align="center"><![CDATA[
.        +---+
.        | As|
.        | 64|
.        ++-++
.         | |
.      +--+ +--+
.      |       |
.   +--++     ++--+
.   | E |     | F |
.   | 63+-+   | 63+-------+
.   ++--+ |   ++-++       |
.    |    |    | |        |
.    | +-------+ |        |
.    | |  |      |        |
.    | |  +----+ |        |
.    | |       | |        |
.   ++-++     ++-++     +-+-+
.   | I +-----+ J +-----+ Y |
.   | 62|     | 62|     | 62|
.   ++-++     +--++     ++-++
.    | |         |       | |
.    | +-----------------+ |
.    |           |         |
.    +---------+ |         |
.              | |         |
.             ++-++        |
.             | X +--------+
.             | 0 |
.             +---+

    ]]>
    </artwork>
</figure>


    </section>

</section> <!-- ZTP -->

<section title="Stability Considerations">
    <t>The autoconfiguration mechanism computes a global maximum of levels
        by diffusion. The achieved equilibrium can be disturbed massively by
        all nodes with highest level either leaving or entering the domain (with
        some finer distinctions not explained further).
        It is therefore recommended that each node is multi-homed towards
        nodes with respective HAL offerings. Fortuntately,
        this is the natural state of things for
        the topology variants considered in RIFT.
        </t>

    </section>

</section>

        <section title="Further Mechanisms">
            <section title="Overload Bit" anchor="overload">

                <t>Overload Bit MUST be respected in all according
                    reachability computations. A node with overload
                    bit set SHOULD NOT advertise any reachability
                    prefixes southbound except locally hosted ones.
                </t>

                <t>The leaf node SHOULD set the 'overload' bit
                    on its node TIEs, since if the spine nodes were
                    to forward traffic not meant for the local
                    node, the leaf node does not have the topology
                    information to prevent a routing/forwarding
                    loop.

                </t>
            </section>


<section title="Optimized Route Computation on Leafs">

    <t>Since the leafs do see only "one hop away" they do not need to
        run a full SPF but can simply gather prefix candidates from their
        neighbors and build the according routing table.
    </t>

    <t>A leaf will have no N-TIEs except its own and
        optionally from its east-west
        neighbors.  A leaf will have S-TIEs from its neighbors.
        </t>
    <t>Instead of creating a network graph from its N-TIEs and
        neighbor's S-TIEs and then running
        an SPF, a leaf node can simply compute the minimum cost and
        next_hop_set to each leaf neighbor by examining its local
        interfaces, determining bi-directionality from the associated
        N-TIE, and specifying the neighbor's next_hop_set set and cost
        from the minimum cost local interfaces to that neighbor.</t>

    <t>Then a leaf attaches prefixes as in <xref
        target="sec_attaching_prefixes"/> as well as the policy-guided
        prefixes as in <xref target="sec_attaching_pgps"/>.</t>
</section>


<section title="Key/Value Store">

        <section title="Southbound">

        <t>
        The protocol supports a southbound distribution of key-value pairs that
        can be used to e.g. distribute configuration information during topology
        bring-up. The KV S-TIEs can arrive from multiple nodes
        and hence need tie-breaking per key. We use the following rules
        </t>
        <t>
    <list style="numbers">
        <t>Only KV TIEs originated by a node to which the receiver has an adjacency are
            considered.</t>
        <t>Within all valid KV S-TIEs containing the key, the value of the KV S-TIE for
            which the according node S-TIE is present, has the
            highest level and within the same level has
            highest originator ID is preferred. If keys in the most preferred
            TIEs are overlapping, the behavior is undefined.
            </t>

        </list>

    </t>

        <t>Observe that if a node goes down, the node south of it looses adjacencies
            to it and with that the KVs will be disregarded and on tie-break changes
            new KV re-advertised to prevent stale information
            being used by nodes further south. KV information in southbound
            direction is not result of
            independent computation of every node but a diffused computation.
            </t>

        </section>

        <section title="Northbound">
            <t>Certain use cases seem to necessitate distribution of essentialy
                KV information that is generated in the leafs in the northbound
                direction. Such information is flooded in KV N-TIEs.
                Since the originator of northbound KV is preserved during
                northbound flooding, overlapping keys could be used. However,
                to omit further protocol complexity, only the value of the key
            in TIE tie-broken in same fashion as southbound KV TIEs is used.</t>
            </section>
</section>

<section title="Interactions with BFD" anchor="bfd">
    <t>RIFT MAY incorporate <xref target="RFC5881">BFD</xref> to react quickly
        to link failures. In such case following procedures
        are introduced: </t>

    <t>
        <list>

            <t>After RIFT 3-way hello adjacency convergence
                a BFD session MAY be formed automatically
                between the RIFT endpoints without further configuration.</t>
            <t>In case RIFT looses 3-way hello adjacency, the BFD session should be
                brought down until 3-way adjacency is formed again.</t>
            <t>In case established BFD session goes Down after it was Up, RIFT
                adjacency should be re-initialized from scratch.</t>
            <t>In case of parallel
                links between nodes each link may run its own independent BFD session.
            </t>
            <t>In case RIFT changes link identifiers both the hello as well as the BFD
                sessions will be brought down and back up again.</t>
        </list>
    </t>
</section>


<section title="Fabric Bandwidth Balancing">

<t>A well understood problem in fabrics is that in case of link losses
    it would be ideal to rebalance how much traffic is offered to
    switches in the next layer
    based on the ingress and egress bandwidth they have. Current attempts
    rely mostly on specialized traffic engineering via controller or leafs being
    aware of complete topology with according cost and complexity.
</t>
<t>RIFT presents a very light weight mechanism that can deal with the problem
    in an approximative way based on the fact that RIFT is loop-free.
    </t>

<section title="Northbound Direction"
    anchor="varydefault">
    <t>In a first step, a node can compare the
amount of northbound bandwith available to neighbors at the same level
and modify
metric on its advertised default route (or even other routes)
to present a different distance leading to e.g. e.g. weighted
    ECMP forwarding on leafs. We call such a distance Bandwidth Adjusted Distance
    or BAD. This is best illustrated by a simple example.
    </t>

<t>
    <figure align="center" anchor="pic-default-modify"
    title="Balancing Bandwidth">
    <artwork align="center"><![CDATA[
.    |   x              |   |
.    |   x              |   |
.  +-+---+-+          +-+---+-+
.  |       |          |       |
.  |Node111|          |Node112|
.  +-+---+++          ++----+++
.    |x  ||           ||    ||
.    ||  |+---------------+ ||
.    ||  +---------------+| ||
.    ||               || || ||
.    ||  +------------+| || ||
.    ||  |+------------+ || ||
.    |x  ||              || ||
.  +-+---+++          +--++-+++
.  |       |          |       |
.  |Leaf111|          |Leaf112|
.  +-------+          +-------+

    ]]>
    </artwork>
</figure>
</t>

<!-- 
 More precisely, a node determines all other nodes at the same level
 N_s using the same algorithm as Section 4.2.3.8 while ignoring
 overloaded nodes and computes its own available bandwidth B_s as sum
 of bandwidth on links to its northern neighbors.  In the same fashion
 minimum and maximum available northbound bandwidth for any node in
 N_s is determined as B_min and B_max.  Each node MUST then remap its
 own bandwidth B_s as compared to the range [B_min, B_max] into the
 default route distance in range [`normalized_bw_metric_max`,
 `normalized_bw_metric_min`] in a linear fashion, i.e.  node with
 B_max will advertise default route with `normalized_bw_metric_min`
 and the one with B_min a default route with
 `normalized_bw_metric_max`. In case B_min and B_max cannot be
 determined (e.g.  none of the nodes have any northbound metric), the
 node MUST use `normalized_bw_metric_min`.  In case where other nodes
 have northbound links but the node itself has none it MUST use
 `normalized_bw_metric_max` which amounts to trying to deflect most of
 the northbound traffic to those nodes.

 The range [`normalized_bw_metric_max`, `normalized_bw_metric_min`]
 leaves intentionally enough space to allow for local configuration
 that forces either a lower or higher distance than any automatically
 computed BAD.
 -->

<t>All links in <xref target="pic-default-modify"/> are assumed to have the same
    bandwith for simplicity. Node 111 sees in the node S-TIE of 112 that Node 112
    has twice the amount of bandwidth going northbound and therefore Node 111 will
    advertise its default route cost (BAD) as twice the default which without
    further failures would
    lead to Leaf 111 and Leaf 112 distributing 2/3 of the traffic to Node 111
    and 1/3 to Node 112.
    </t>

<t>Further, in <xref target="pic-default-modify"/> we assume that Leaf111 lost
    one of the parallel links to Node 111 and with that wants to push more traffic
    onto Node 112. This leads to local modification of the received BADs and each
    node can choose the ratio here independently based on understanding of e.g. traffic
    distribution between E-W and N-S or queue occupancy. If we assume that 50% of the leaf's traffic is
    for Leaf112 and 50% exits northbound we would modify the BADs accordingly
    to the bandwidth available towards each of them and end in Leaf 111 with a
    weight of 4 to Node 111 and weight of 1 to Node 112 which gives us roughly
    4/5 of the traffic going to Node 112.
    </t>

<t>Future version of this document will provide the precise algorithm to compute
    BADs from all other nodes at the same level
    using the same algorithm as <xref target="reduce"/> while ignoring
    overloaded nodes.</t>

<t>Observe that since BAD is only computed for default routes any disaggregated
    prefixes or PGP are not affected. </t>
<t>Observe further that a change in available bandwidth will only affect one
    level down in the fabric, i.e. blast radius of bandwidth changes is contained.</t>

</section>

<section title="Southbound Direction"
    anchor="varysouthbandwidth">

    <t>Due to its loop free properties a node could take during S-SPF into account
        the available bandwidth on the nodes in lower layers and modify the amount of traffic
        offered to next level's "southbound" nodes based as what it sees is the total achievable
        maximum flow through those nodes. It is worth observing that such
        computations will work better if standardized but does not have to be
        necessarily. As long the packet keeps on heading south it
        will take one of the available paths and arrive at the intended destination.
    </t>
    <t>Future versions of this document will fill in more details.
        </t>
</section>
</section>

<section title="Segment Routing Support with RIFT" anchor="srsupport">

<t>Recently, alternative architecture to reuse labels as segment identifiers
    <xref target="I-D.ietf-spring-segment-routing"/>
    has gained traction and may present use cases in DC fabric that
    would justify its deployment. Such use cases will either precondition an assignment
    of a label per node (or other entities where the mechanisms are equivalent) or
    a global assignment and a knowledge of topology everywhere to compute
    segment stacks of interest. We deal with the two issues separately.

</t>

<section title="Global Segment Identifiers Assignment" anchor="sidsupport">

<t>Global segment identifiers are normally assumed to be provided by some kind of
    a centralized "controller" instance and distributed to other entities. This can
    be performed in RIFT by attaching a controller to the superspine nodes at
    the top of the fabric where the whole topology is always visible, assign such
    identifiers
    and then distribute those via the KV mechanism towards all nodes so they can
    perform things like probing the fabric for failures using a stack of segments.

    </t>
</section>

<section title="Distribution of Topology Information" anchor="sidfull">

    <t>Some segment routing use cases seem to precondition full knowledge of fabric topology
    in all nodes which can be performed albeit at the loss of one of highly
    desirable properties of RIFT, namely minimal blast radius. Basically,
    RIFT can function as a flat IGP by switching off its flooding scopes.
    All nodes
    will end up with full topology view and albeit the N-SPF and S-SPF are
    still performed based on RIFT rules, any computation with segment
    identifiers that needs full topology can use it. </t>
    <t>Beside blast radius problem, excessive flooding may present
        significant load on implementations. RIFT can be extended beside
        the mechanism in <xref target="reduce"/> to
        provide an algorithm for globally optimized flooding minimalization
        should demand for such a use case solidify.
    </t>
</section>

    </section>



<section title="Leaf to Leaf Procedures" anchor="leaf2leaf">
    <t>RIFT can optionally allow special leaf East-West adjacencies under
        additional set of rules. The leaf supporting those
        procedures MUST:

    </t>

<t>
    <list>
        <t>advertise the
            LEAF_2_LEAF flag in node capabilities AND</t>
        <t>
            set the overload bit on all leaf's node TIEs AND</t>
        <t>flood only node's own north and south TIEs over E-W leaf adjacencies AND </t>
        <t>always use E-W leaf adjacency  in both north as well as south computation AND</t>
        <t>install a discard route for any advertised aggregate in leaf's TIEs AND</t>
        <t>never form southbound adjacencies.</t>
        </list>
    </t>

<t>This will allow the E-W leaf nodes to exchange traffic strictly for the prefixes
advertised in each other's north prefix TIEs (since the southbound computation will
    find the reverse direction in the other node's TIE and install its north prefixes).
</t>

</section><!-- leaf-to-leaf -->

<section title="Other End-to-End Services">
    <t>Losing full, flat topology information at every node
        will have an
        impact on some of the
        end-to-end network services. This is the price paid for
        minimal disturbance in case
        of failures and reduced flooding and memory requirements on
        nodes lower south in the
        level hierarchy.
    </t>
</section>

<section title="Address Family and Multi Topology Considerations">
    <t>Multi-Topology (MT)<xref target="RFC5120"></xref>
        and Multi-Instance (MI)<xref target="RFC6822"></xref>
        is used today in link-state routing protocols to
        support several domains on the same
        physical topology. RIFT supports this capability by
        carrying transport ports in the LIE protocol
        exchanges.  Multiplexing of LIEs can be achieved by
        either choosing varying multicast addresses or ports
        on the same address.
    </t>

    <t>BFD interactions in <xref target="bfd"/>
        are implementation dependent when multiple RIFT instances run on the
    same link.</t>
</section>

<section title="Reachability of Internal Nodes in the Fabric" anchor="onastick">
    <t>
        RIFT does not precondition that its nodes have reachable addresses albeit
        for operational purposes this is clearly desirable. Under normal operating
        conditions this can be easily achieved by e.g. injecting the node's loopback
        address into North Prefix TIEs.

    </t>

    <t>
        Things get more interesting in case a node looses all its northbound
        adjacencies but is not at
        the top of the fabric. In such a case a node that detects that some other members
        at its level
        are advertising northbound adjacencies MAY inject its loopback address
        into southbound PGP TIE and become reachable "from the south" that way.
        Further, a solution may be implemented where based on e.g. a "well known"
        community such a southbound PGP is reflected at level 0 and advertised
        as northbound PGP again to allow for "reachability from the north" at the
        cost of additional flooding.
    </t>
</section>


<section title="One-Hop Healing of Levels with East-West Links" anchor="healing">

    <t>
        Based on the rules defined in <xref target="calculate"/>, <xref target="defaultrouterules"/> and
        given presence of E-W links, RIFT can provide a one-hop protection of nodes that lost
        all their northbound links or in other complex link set failure scenarios.
        <xref target="onastickexample"/> explains
        the resulting behavior based on one such example.
    </t>

</section>

</section>
                </section>

<section title="Examples">

        <section title="Normal Operation">


            <t>This section describes RIFT deployment in the example topology
                without any node or link failures. We disregard flooding
                reduction for simplicity's sake.
                </t>

<t>As first step, the following bi-directional adjacencies will be created
    (and any other links that do not fulfill LIE rules in <xref target="LIE"></xref>
     disregarded):


    <list style="numbers">
        <t>Spine 21 (PoD 0) to Node 111, Node 112, Node 121, and Node 122</t>

        <t>Spine 22 (PoD 0) to Node 111, Node 112, Node 121, and Node 122</t>

        <t>Node 111 to Leaf 111, Leaf 112</t>

        <t>Node 112 to Leaf 111, Leaf 112</t>

        <t>Node 121 to Leaf 121, Leaf 122</t>
        
        <t>Node 122 to Leaf 121, Leaf 122</t>

        </list>
                </t>

<t>Consequently, N-TIEs would be originated by Node 111 and Node 112 and
    each set would be sent to both Spine 21 and Spine 22.
    N-TIEs also would be originated by Leaf 111 (w/ Prefix 111) and Leaf 112
    (w/ Prefix 112 and the multi-homed prefix)
    and each set would be sent to Node 111 and Node 112.
    Node 111 and Node 112 would then flood these N-TIEs to Spine 21
    and Spine 22.
    </t>

<t>
    Similarly, N-TIEs would be originated by Node 121 and Node 122 and
    each set would be sent to both Spine 21 and Spine 22.
    N-TIEs also would be originated by Leaf 121 (w/ Prefix 121 and the
    multi-homed prefix) and Leaf 122
    (w/ Prefix 122) and each set would be sent to Node 121 and Node 122.
    Node 121 and Node 122 would then flood these N-TIEs to Spine 21
    and Spine 22.
    </t>

<t>At this point both Spine 21 and Spine 22, as well as any controller to
    which they are connected, would have the complete network topology.
    At the same time, Node 111/112/121/122 hold only the N-ties of
    level 0 of their respective PoD. Leafs hold only their own N-TIEs.
    </t>

<t>S-TIEs with adjacencies and
    a default IP prefix would then be originated by Spine 21 and
    Spine 22 and each would be flooded to Node 111, Node 112, Node 121, and
    Node 122.  Node 111, Node 112, Node 121, and Node 122 would each
    send the S-TIE from Spine 21 to Spine 22 and the S-TIE from Spine 22 to
    Spine 21.  (S-TIEs are reflected up to level from which they are received
    but they are NOT propagated southbound.)
    </t>

<t>An S Tie with a default IP prefix would be originated by Node
    111 and Node 112 and each would be sent to Leaf 111 and Leaf 112.
    Leaf 111 and Leaf 112 would each send the S-TIE from Node 111 to
    Node 112 and the S-TIE from Node 112 to Node 111.
</t>

<t>Similarly, an S Tie with a default IP prefix would be originated by Node
    121 and Node 122 and each would be sent to Leaf 121 and Leaf 122.
    Leaf 121 and Leaf 122 would each send the S-TIE from Node 121 to Node 122
    and the S-TIE from Node 122 to Node 121.

    At this point IP connectivity with maximum possible ECMP has been
    established between the leafs while constraining the amount of
    information held by each node
    to the minimum necessary for normal operation and dealing with failures.
    </t>


                   </section>

        <section title="Leaf Link Failure">

            <t>

                <figure align="center" anchor="pic-one-link-fail"
                    title="Single Leaf link failure">
                    <artwork align="center"><![CDATA[
.  |   |              |   |
.+-+---+-+          +-+---+-+
.|       |          |       |
.|Node111|          |Node112|
.+-+---+-+          ++----+-+
.  |   |             |    |
.  |   +---------------+  X
.  |                 | |  X Failure
.  |   +-------------+ |  X
.  |   |               |  |
.+-+---+-+          +--+--+-+
.|       |          |       |
.|Leaf111|          |Leaf112|
.+-------+          +-------+
.      +                  +
.     Prefix111     Prefix112
                    ]]>
                    </artwork>
                </figure>
            </t>


            <t>In case of a failing leaf link between node 112 and leaf 112
                the link-state
                information will cause re-computation of the necessary SPF
                and the higher levels will
                stop forwarding towards prefix 112 through node 112. Only
                nodes 111 and 112, as well
                as both spines will see control traffic. Leaf 111 will
                receive a new S-TIE
                from node 112 and reflect back to node 111.
                <!--
                 The link state information allows for maximum
                 convergence speed on failures and could be used to
                 provide sophisticated load balancing based on the available ECMP degree
                 in lower levels. Imagine Spine21 sending a packet south to Leaf112 whereas
                 the link Node112->Leaf112 failed. To ensure saturation of the remaining
                 three links south, it could divide the traffic amongst Node112 and
                 Node111 in ratio 1:2.</t>
                 -->

                Node 111 will de-aggregate prefix 111 and prefix 112 but
                we will not describe it further here
                since de-aggregation is emphasized
                in the next example. It is worth observing
                however
                in this example that if leaf 111 would keep on forwarding traffic towards
                prefix 112 using the advertised south-bound default of node 112
                the traffic would end up on spine 21 and spine 22 and cross back
                into pod 1 using node 111. This is arguably
                not as bad as black-holing
                present in the next example but clearly undesirable.
                Fortunately, de-aggregation prevents this type of behavior except
                for a transitory period of time.

            </t>
        </section>

        <section title="Partitioned Fabric" anchor="fabriccut">
            <t>

                <figure align="center" anchor="pic-part-fabric" title="Fabric partition">
                    <artwork align="center"><![CDATA[
.                +--------+          +--------+   S-TIE of Spine21
.                |        |          |        |   received by
.                |Spine 21|          |Spine 22|   reflection of
.                ++-+--+-++          ++-+--+-++   Nodes 112 and 111
.                 | |  | |            | |  | |
.                 | |  | |            | |  | 0/0
.                 | |  | |            | |  | |
.                 | |  | |            | |  | |
.  +--------------+ |  +--- XXXXXX +  | |  | +---------------+
.  |                |    |         |  | |  |                 |
.  |    +-----------------------------+ |  |                 |
.  0/0  |           |    |         |    |  |                 |
.  |    0/0       0/0    +- XXXXXXXXXXXXXXXXXXXXXXXXX -+     |
.  |  1.1/16        |              |    |  |           |     |
.  |    |           +-+    +-0/0-----------+           |     |
.  |    |             |   1.1./16  |    |              |     |
.+-+----++          +-+-----+     ++-----0/0          ++----0/0
.|       |          |       |     |    1.1/16         |   1.1/16
.|Node111|          |Node112|     |Node121|           |Node122|
.+-+---+-+          ++----+-+     +-+---+-+           ++---+--+
.  |   |             |    |         |   |              |   |
.  |   +---------------+  |         |   +----------------+ |
.  |                 | |  |         |                  | | |
.  |   +-------------+ |  |         |   +--------------+ | |
.  |   |               |  |         |   |                | |
.+-+---+-+          +--+--+-+     +-+---+-+          +---+-+-+
.|       |          |       |     |       |          |       |
.|Leaf111|          |Leaf112|     |Leaf121|          |Leaf122|
.+-+-----+          ++------+     +-----+-+          +-+-----+
.  +                 +                  +              +
.  Prefix111    Prefix112             Prefix121     Prefix122
.                                       1.1/16
                    ]]>
                    </artwork>
                </figure>
            </t>

            <t>

                <xref target="pic-part-fabric"></xref> shows the arguably most
                catastrophic but also the most interesting case. Spine 21 is
                completely severed from access to Prefix 121 (we use in the figure
                1.1/16 as example) by double link failure.
                However unlikely, if left
                unresolved, forwarding from leaf 111 and leaf 112 to prefix 121 would
                suffer 50% black-holing based on pure default route
                advertisements by spine 21
                and spine 22.
            </t>


            <t>
                The mechanism used to resolve this scenario is hinging on the
                distribution of southbound representation by spine 21 that is
                reflected by node 111 and node 112 to spine 22. Spine 22,
                having computed reachability to all prefixes in the network,
                advertises with the default route
                the ones that are reachable only via lower level
                neighbors that spine 21 does not show an adjacency to. That
                results
                in node 111 and node 112 obtaining a longest-prefix match
                to prefix 121 which leads through spine 22 and prevents black-holing
                through spine 21 still advertising the 0/0 aggregate only.
            </t>

            <t>The prefix 121 advertised by spine 22 does not have
                to be propagated further towards leafs since they do
                no benefit from this information. Hence the amount of flooding is
                restricted to spine 21 reissuing its S-TIEs
                and reflection of those by node 111 and node 112. The resulting
                SPF in spine 22 issues a new prefix S-TIEs containing 1.1/16. None of
                the leafs become aware of the changes and the failure is
                constrained strictly to the level that became partitioned.

            </t>

            <t>To finish with an example of the resulting sets computed using notation
                    introduced in <xref target="dissagregate"/>,
                    spine 22 constructs the following sets:
                    </t>
<t>
                <list>
                    <t>|R = Prefix 111, Prefix 112, Prefix 121, Prefix 122</t>

                    <t>|H (for r=Prefix 111) = Node 111, Node 112</t>

                    <t>|H (for r=Prefix 112) = Node 111, Node 112</t>

                   <t>|H (for r=Prefix 121) = Node 121, Node 122</t>

                    <t>|H (for r=Prefix 122) = Node 121, Node 122</t>

                    <t>|A (for Spine 21) = Node 111, Node 112</t>
</list>
</t>
                <t>With that and |H (for r=prefix 121) and |H (for r=prefix 122)
                    being disjoint from |A (for spine 21), spine 22 will
                    originate an S-TIE with prefix 121 and prefix 122,
                    that is flooded to nodes 112, 112, 121 and 122.
                </t>

        </section>

        <section title="Northbound Partitioned Router and Optional East-West Links"
            anchor="onastickexample">



            <t>
                </t>
            <t>
            <figure align="center" anchor="north-part-node"
                title="North Partitioned Router">

                <artwork align="center"><![CDATA[
.   +                  +                  +
.   X N1               | N2               | N3
.   X                  |                  |
.+--+----+          +--+----+          +--+-----+
.|       |0/0>  <0/0|       |0/0>  <0/0|        |
.|  A01  +----------+  A02  +----------+  A03   | Level 1
.++-+-+--+          ++--+--++          +---+-+-++
. | | |              |  |  |               | | |
. | | +----------------------------------+ | | |
. | |                |  |  |             | | | |
. | +-------------+  |  |  |  +--------------+ |
. |               |  |  |  |  |          | |   |
. | +----------------+  |  +-----------------+ |
. | |             |     |     |          | | | |
. | | +------------------------------------+ | |
. | | |           |     |     |          |   | |
.++-+-+--+        | +---+---+ |        +-+---+-++
.|       |        +-+       +-+        |        |
.|  L01  |          |  L02  |          |  L03   | Level 0
.+-------+          +-------+          +--------+
                ]]>
                </artwork>
            </figure>
</t>

            <t>
                <xref target="north-part-node"/> shows a part of a fabric where
                level 1 is horizontally connected and A01 lost its only northbound
                adjacency. Based on N-SPF rules in <xref target="nspf"/> A01 will
                compute northbound reachability by using the link A01 to A02 (whereas
                A02 will NOT use this link during N-SPF). Hence A01 will still
                advertise the default towards level 0 and route unidirectionally
                using the horizontal
                link. Moreover, based on <xref target="onastick"/>
                it may advertise its loopback address as south PGP to remain
                reachable "from the south" for operational purposes. This is
                necessary since A02 will NOT route towards A01 using the E-W
                link (doing otherwise may form routing loops).
</t>

<t>
                As further consideration, the moment A02 looses link N2 the situation
                evolves again. A01 will have no more northbound reachability while
                still seeing A03 advertising northbound adjacencies in its
                south node tie. With that it will stop advertising a default
                route due to <xref target="defaultrouterules"/>.
                Moreover, A02 may now inject its
                loopback address as south PGP.
                </t>

            </section>

</section>


        <section title="Implementation and Operation: Further Details">

<section title="Considerations for Leaf-Only Implementation">

<t>Ideally RIFT can be stretched out to the lowest level in the IP fabric to
    integrate ToRs or even servers. Since those entities would run as leafs
    only, it is worth to observe that a leaf only version is significantly
    simpler to implement and requires much less resources:
    </t>

<t>
    <list style="numbers">
        <t>Under normal conditions, the leaf needs to support a multipath
        default route only. In worst partitioning
        case it has to be capable of accommodating
        all the leaf routes in its own POD to prevent black-holing.</t>
        <t>Leaf nodes hold only their own N-TIEs and S-TIEs of Level 1 nodes
            they are connected to; so overall few in numbers.</t>
    <t>Leaf node does not have to support flooding reduction and de-aggregation.</t>
    <t>Unless optional leaf-2-leaf procedures are desired
        default route origination, S-TIE origination is
    unnecessary.</t>
    </list>
    </t>


    </section>

<section title="Adaptations to Other Proposed Data Center Topologies">

<t>
    <figure align="center" anchor="levelshortcuts" title="Level Shortcut">

        <artwork align="center"><![CDATA[
.  +-----+        +-----+
.  |     |        |     |
.+-+ S0  |        | S1  |
.| ++---++        ++---++
.|  |   |          |   |
.|  | +------------+   |
.|  | | +------------+ |
.|  | |              | |
.| ++-+--+        +--+-++
.| |     |        |     |
.| | A0  |        | A1  |
.| +-+--++        ++---++
.|   |  |          |   |
.|   |  +------------+ |
.|   | +-----------+ | |
.|   | |             | |
.| +-+-+-+        +--+-++
.+-+     |        |     |
.  | L0  |        | L1  |
.  +-----+        +-----+
        ]]>
        </artwork>
    </figure>
</t>


    <t>
        Strictly speaking, RIFT is not limited to Clos variations only. The protocol
        preconditions only a sense of 'compass rose direction' achieved by
        configuration (or derivation) of levels and other topologies are possible within this
        framework. So, conceptually,
    one could include leaf to leaf links and even shortcut between layers but
    certain requirements in <xref target="reqs"/> will not be met anymore.
    As an example, shortcutting
    levels illustrated in <xref target="levelshortcuts"/> will lead either to suboptimal
    routing when L0 sends traffic to L1 (since using S0's default route will lead to
    the traffic being sent back to A0 or A1) or the leafs need each other's routes
    installed to understand that only A0 and A1 should be used to talk to each other.
    </t>

    <t>Whether such modifications of topology constraints make sense is dependent on many technology
        variables and the exhausting treatment of the topic is definitely outside the scope of
        this document.
        </t>

</section>

<section title="Originating Non-Default Route Southbound">

<t>Obviously, an implementation may choose to originate southbound instead of a strict
    default route (as described in <xref target="defaultrouterules"/>) a shorter
    prefix P' but in such a scenario
    all addresses carried within the RIFT domain must be contained within P'.</t>

    </section>

    </section>

        <section title="Security Considerations" anchor="security">
            <t>The protocol has provisions for nonces and can include authentication
                mechanisms in the future comparable to <xref target="RFC5709"/> and
            <xref target="RFC7987"/>.</t>
                    <t>One can consider additionally attack vectors where a router
                        may reboot many times while changing its system ID and pollute
                        the network with many stale TIEs or TIEs are sent with very long
                        lifetimes and not cleaned up when the routes vanishes.
Those attack vectors are not unique to RIFT.
                        Given large memory footprints
                        available today those attacks should be relatively benign.  Otherwise
                        a node can implement a strategy of e.g. discarding contents of all TIEs
                        of nodes that were not present in the SPF tree over a certain
                        period of time. Since the protocol, like all modern link-state
                        protocols, is self-stabilizing and will advertise the presence
                        of such TIEs to its neighbors, they can be
                        re-requested again if a computation finds that it sees an
                        adjacency formed towards the system ID of the discarded
                        TIEs.
                </t>
<t><xref target="ZTP"/> presents many attack vectors in untrusted environments, starting
    with nodes that oscillate their level offers to the possiblity of a node offering
    a three way adjacency with the highest possible level value with a very long holdtime
    trying to put itself "on top of the lattice" and with
    that gaining access to the whole southbound topology.
    Session authentication mechanisms
    are necessary in
    environments where this is possible.
    </t>
            </section>



        <section title="Information Elements Schema" anchor="schema">

            <t>This section introduces the schema for information elements.</t>
            <t>On schema changes that</t>
                <t>
                    <list style="numbers">
                        <t>change  field numbers or</t>
                        <t>add new required fields or</t>
                        <t>remove  fields or</t>
                        <t>change  lists into sets, unions into structures or</t>
                        <t>change  multiplicity of fields or</t>
                        <t>changes name of any field</t>
                        <t>change  datatypes of any field or</t>
                        <t>changes default value of any field</t>
                        </list>
                    </t>
                <t>major version of the schema MUST increase.
                    All other changes MUST
                increase minor version within the same major.</t>

<t>Thrift serializer/deserializer MUST not discard optional, unknown
    fields but
    preserve and
    serialize them again when re-flooding whereas missing optional fields
    MAY be
    replaced
    with according default values if present.
    </t>

<t>All signed integer as forced by Thrift support must be cast for internal
    purposes to equivalent unsigned values without discarding the signedness bit.
    An implementation SHOULD try to avoid using the signedness bit when
    generating values.</t>

<t>The schema is normative.</t>


<section title="common.thrift">
    <t>
<figure><artwork><![CDATA[

/**
    Thrift file with common definitions for RIFT
*/

namespace * common

/** @note MUST be interpreted in implementation as unsigned 64 bits.
 *        The implementation SHOULD NOT use the MSB.
 */
typedef i64    SystemIDType
typedef i32    IPv4Address
/** this has to be of length long enough to accomodate prefix */
typedef binary IPv6Address
/** @note MUST be interpreted in implementation as unsigned 16 bits */
typedef i16    UDPPortType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    TIENrType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    MTUSizeType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    SeqNrType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    LifeTimeInSecType
/** @note MUST be interpreted in implementation as unsigned 16 bits */
typedef i16    LevelType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    PodType
/** @note MUST be interpreted in implementation as unsigned 16 bits */
typedef i16    VersionType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    MetricType
/** @note MUST be interpreted in implementation as unsigned 32 bits */
typedef i32    BandwithInMegaBitsType
typedef string KeyIDType
/** node local, unique identification for a link (interface/tunnel
  * etc. Basically anything RIFT runs on). This is kept
  * at 32 bits so it aligns with BFD [RFC5880] discriminator size.
  */
typedef i32    LinkIDType
typedef string KeyNameType
typedef i8     PrefixLenType
/** timestamp in seconds since the epoch */
typedef i64    TimestampInSecsType
/** security nonce */
typedef i64    NonceType
/** adjacency holdtime */
typedef i16    HoldTimeInSecType

/** Flags indicating nodes behavior in case of ZTP and support
    for special optimization procedures. It will force level to `leaf_level`
 */
enum LeafIndications {
    leaf_only                            =0,
    leaf_only_and_leaf_2_leaf_procedures =1,
}

/** default bandwidth on a link */
const BandwithInMegaBitsType  default_bandwidth    = 10
/** fixed leaf level when ZTP is not used */
const LevelType   leaf_level              = 0
const LevelType   default_level           = leaf_level
/** This MUST be used when node is configured as superspine in ZTP.
    This is kept reasonably low to alow for fast ZTP convergence on
    failures. */
const LevelType   default_superspine_level = 24
const PodType     default_pod              = 0
const LinkIDType  undefined_linkid         = 0
/** default distance used */
const MetricType  default_distance         = 1
/** any distance larger than this will be considered infinity */
const MetricType  infinite_distance       = 0x70000000
/** any element with 0 distance will be ignored,
 *  missing metrics will be replaced with default_distance
 */
const MetricType  invalid_distance        = 0
const bool overload_default               = false
const bool flood_reduction_default        = true
const HoldTimeInSecType default_holdtime  = 3
/** by default LIE levels are ZTP offers */
const bool default_not_a_ztp_offer        = false
/** 0 is illegal for SystemID */
const SystemIDType IllegalSystemID        = 0
/** empty set of nodes */
const set<SystemIDType> empty_set_of_nodeids = {}

/** normalized bandwidth metric maximum, i.e. node with lowest northbound bandwidth
 *  at its level uses this metric to advertise its default route */
const MetricType normalized_bw_metric_max = 0x1fff
/** normalized bandwidth metric minimum, i.e. node with highest northbound bandwidth
 *  at its level uses this metric to advertise its default route */
const MetricType normalized_bw_metric_min = 0x00ff

/** default UDP port to run LIEs on */
const UDPPortType     default_lie_udp_port       =  6949
const UDPPortType     default_tie_udp_flood_port =  6950

/** default MTU size to use */
const MTUSizeType     default_mtu_size           =  1400
/** default mcast is v4 224.0.1.150, we make it i64 to
 *  help languages struggling with highest bit */
const i64             default_lie_v4_mcast_group =  3758096790

/** indicates whether the direction is northbound/east-west
  * or southbound */
enum TieDirectionType {
    Illegal           = 0,
    South             = 1,
    North             = 2,
    DirectionMaxValue = 3,
}

enum AddressFamilyType {
   Illegal                = 0,
   AddressFamilyMinValue  = 1,
   IPv4     = 2,
   IPv6     = 3,
   AddressFamilyMaxValue  = 4,
}

struct IPv4PrefixType {
    1: required IPv4Address    address;
    2: required PrefixLenType  prefixlen;
}

struct IPv6PrefixType {
    1: required IPv6Address    address;
    2: required PrefixLenType  prefixlen;
}

union IPAddressType {
    1: optional IPv4Address   ipv4address;
    2: optional IPv6Address   ipv6address;
}

union IPPrefixType {
    1: optional IPv4PrefixType   ipv4prefix;
    2: optional IPv6PrefixType   ipv6prefix;
}

enum TIETypeType {
    Illegal                 = 0,
    TIETypeMinValue         = 1,
    /** first legal value */
    NodeTIEType             = 2,
    PrefixTIEType           = 3,
    TransitivePrefixTIEType = 4,
    PGPrefixTIEType         = 5,
    KeyValueTIEType         = 6,
    TIETypeMaxValue         = 7,
}

/** @note: route types which MUST be ordered on their preference
 *  PGP prefixes are most preferred attracting
 *  traffic north (towards spine) and then south
 *  normal prefixes are attracting traffic south (towards leafs),
 *  i.e. prefix in NORTH PREFIX TIE is preferred over SOUTH PREFIX TIE
 */
enum RouteType {
    Illegal               = 0,
    RouteTypeMinValue     = 1,
    /** First legal value. */
    /** Discard routes are most prefered */
    Discard               = 2,

    /** Local prefixes are directly attached prefixes on the
     *  system such as e.g. interface routes.
     */
    LocalPrefix           = 3,
    /** advertised in S-TIEs */
    SouthPGPPrefix        = 4,
    /** advertised in N-TIEs */
    NorthPGPPrefix        = 5,
    /** advertised in N-TIEs */
    NorthPrefix           = 6,
    /** advertised in S-TIEs */
    SouthPrefix           = 7,
    /** transitive southbound are least preferred */
    TransitiveSouthPrefix = 8,
    RouteTypeMaxValue     = 9
}


]]></artwork></figure>
</t>

</section>

<section title="encoding.thrift">
    <t>
    <figure><artwork><![CDATA[

/**
    Thrift file for packet encodings for RIFT
*/

include "common.thrift"

/** represents protocol encoding schema major version */
const i32 protocol_major_version = 8
/** represents protocol encoding schema minor version */
const i32 protocol_minor_version = 0

/** common RIFT packet header */
struct PacketHeader {
    1: required common.VersionType major_version = protocol_major_version;
    2: required common.VersionType minor_version = protocol_minor_version;
    /** this is the node sending the packet, in case of LIE/TIRE/TIDE
        also the originator of it */
    3: required common.SystemIDType  sender;
    /** level of the node sending the packet, required on everything except
      * LIEs. Lack of presence on LIEs indicates UNDEFINED_LEVEL and is used
      * in ZTP procedures.
     */
    4: optional common.LevelType     level;
}

/** Community serves as community for PGP purposes */
struct Community {
    1: required i32          top;
    2: required i32          bottom;
}

/** Neighbor structure  */
struct Neighbor {
    1: required common.SystemIDType        originator;
    2: required common.LinkIDType          remote_id;
}

/** Capabilities the node supports */
struct NodeCapabilities {
    /** can this node participate in flood reduction,
        only relevant at level > 0 */
    1: optional bool                      flood_reduction =
            common.flood_reduction_default;
    /** does this node restrict itself to be leaf only (in ZTP) and
        does it support leaf-2-leaf procedures */
    2: optional common.LeafIndications    leaf_indications;
}

/** RIFT LIE packet

    @note this node's level is already included on the packet header */
struct LIEPacket {
    /** optional node or adjacency name */
    1: optional string                    name;
    /** local link ID */
    2: required common.LinkIDType         local_id;
    /** UDP port to which we can receive flooded TIEs */
    3: required common.UDPPortType        flood_port =
            common.default_tie_udp_flood_port;
    /** layer 3 MTU */
    4: optional common.MTUSizeType        link_mtu_size =
            common.default_mtu_size;
    /** this will reflect the neighbor once received to provid
        3-way connectivity */
    5: optional Neighbor                  neighbor;
    6: optional common.PodType            pod = common.default_pod;
    /** optional nonce used for security computations */
    7: optional common.NonceType          nonce;
    /** optional node capabilities shown in the LIE. The capabilies
        MUST match the capabilities shown in the Node TIEs, otherwise
        the behavior is unspecified. A node detecting the mismatch
        SHOULD generate according error.
     */
    8: optional NodeCapabilities          capabilities;
    /** required holdtime of the adjacency, i.e. how much time
        MUST expire without LIE for the adjacency to drop
     */
    9: required common.HoldTimeInSecType  holdtime =
            common.default_holdtime;
    /** indicates that the level on the LIE MUST NOT be used
        to derive a ZTP level by the receiving node. */
   10: optional bool                      not_a_ztp_offer =
            common.default_not_a_ztp_offer;
}

/** LinkID pair describes one of parallel links between two nodes */
struct LinkIDPair {
    /** node-wide unique value for the local link */
    1: required common.LinkIDType      local_id;
    /** received remote link ID for this link */
    2: required common.LinkIDType      remote_id;
    /** more properties of the link can go in here */
}

/** ID of a TIE

    @note: TIEID space is a total order achieved by comparing the elements
           in sequence defined and comparing each value as an
           unsigned integer of according length
*/
struct TIEID {
    /** indicates direction of the TIE */
    1: required common.TieDirectionType    direction;
    /** indicates originator of the TIE */
    2: required common.SystemIDType        originator;
    3: required common.TIETypeType         tietype;
    4: required common.TIENrType           tie_nr;
}

/** Header of a TIE */
struct TIEHeader {
    2: required TIEID                      tieid;
    3: required common.SeqNrType           seq_nr;
    /** lifetime expires down to 0 just like in ISIS */
    4: required common.LifeTimeInSecType   lifetime;
}

/** A sorted TIDE packet, if unsorted, behavior is undefined */
struct TIDEPacket {
    /** all 00s marks starts */
    1: required TIEID           start_range;
    /** all FFs mark end */
    2: required TIEID           end_range;
    /** _sorted_ list of headers */
    3: required list<TIEHeader> headers;
}

/** A TIRE packet */
struct TIREPacket {
    1: required set<TIEHeader> headers;
}

/** Neighbor of a node */
struct NodeNeighborsTIEElement {
    2: required common.LevelType       level;
    /**  Cost to neighbor.

         @note: All parallel links to same node
         incur same cost, in case the neighbor has multiple
         parallel links at different cost, the largest distance
         (highest numerical value) MUST be advertised
         @note: any neighbor with cost <= 0 MUST be ignored in computations */
    3: optional common.MetricType      cost = common.default_distance;
    /** can carry description of multiple parallel links in a TIE */
    4: optional set<LinkIDPair>        link_ids;

    /** total bandwith to neighbor, this will be normally sum of the
     *   bandwidths of all the parallel links.
     **/
    5: optional common.BandwithInMegaBitsType   bandwidth =
            common.default_bandwidth;
}

/** Flags the node sets */
struct NodeFlags {
    /** node is in overload, do not transit traffic through it */
    1: optional bool         overload = common.overload_default;
}

/** Description of a node.

    It may occur multiple times in different TIEs but if either
        * capabilities values do not match or
        * flags values do not match or
        * neighbors repeat with different values or
        * visible in same level/having partition upper do not match
    the behavior is undefined and a warning SHOULD be generated.
    Neighbors can be distributed across multiple TIEs however if
    the sets are disjoint.

    @note: observe that absence of fields implies defined defaults
*/
struct NodeTIEElement {
    1: required common.LevelType            level;
    /** if neighbor systemID repeats in other node TIEs of same node
        the behavior is undefined. Equivalent to |A_(n,s)(N) in spec. */
    2: required map<common.SystemIDType,
                NodeNeighborsTIEElement>    neighbors;
    3: optional NodeCapabilities            capabilities;
    4: optional NodeFlags                   flags;
    /** optional node name for easier operations */
    5: optional string                      name;

    /** Nodes seen an the same level through reflection through nodes
        having backlink to both nodes. They are equivalent to |V(N) in
        future specifications. Ignored in Node S-TIEs if present.
      */
    6: optional set<common.SystemIDType>    visible_in_same_level
            = common.empty_set_of_nodeids;
    /** Non-overloaded nodes in |V seen as attached to another north
      * level partition due to the fact that some nodes in its |V have
      * adjacencies to higher level nodes that this node doesn't see.
      * This may be used in the computation at higher levels to prevent
      * blackholing. Ignored in Node S-TIEs if present.
      * Equivalent to |PUL(N) in spec. */
    7: optional set<common.SystemIDType>    same_level_unknown_north_partitions
            = common.empty_set_of_nodeids;
}

struct PrefixAttributes {
    /** Observe that in default metric case the node is supposed to advertise
      * metric calculated from comparison of bandwidths at all nodes at its
      * level. **/
    2: required common.MetricType       metric = common.default_distance;
}

/** multiple prefixes */
struct PrefixTIEElement {
    /** prefixes with the associated attributes.
        if the same prefix repeats in multiple TIEs of same node
        behavior is unspecified */
    1: required map<common.IPPrefixType, PrefixAttributes> prefixes;
}

/** keys with their values */
struct KeyValueTIEElement {
    /** if the same key repeats in multiple TIEs of same node
        or with different values, behavior is unspecified */
    1: required map<common.KeyIDType,string>    keyvalues;
}

/** single element in a TIE. enum common.TIETypeType
    in TIEID indicates which elements MUST be present
    in the TIEElement. In case of mismatch the unexpected
    elements MUST be ignored.
 */
union TIEElement {
    /** in case of enum common.TIETypeType.NodeTIEType */
    1: optional NodeTIEElement            node;
    /** in case of enum common.TIETypeType.PrefixTIEType */
    2: optional PrefixTIEElement          prefixes;
    /** transitive prefixes (always southbound) which SHOULD be propagated
     *   southwards towards lower levels to heal
     *   pathological upper level partitioning, otherwise
     *   blackholes may occur. MUST NOT be advertised within a North TIE.
     */
    3: optional PrefixTIEElement          transitive_prefixes;
    4: optional KeyValueTIEElement        keyvalues;
    /** @todo: policy guided prefixes */
}

/** @todo: flood header separately in UDP to allow caching to TIEs
           while changing lifetime?
 */
struct TIEPacket {
    1: required TIEHeader  header;
    2: required TIEElement element;
}

union PacketContent {
    1: optional LIEPacket    lie;
    2: optional TIDEPacket   tide;
    3: optional TIREPacket   tire;
    4: optional TIEPacket    tie;
}

/** protocol packet structure */
struct ProtocolPacket {
    1: required PacketHeader  header;
    2: required PacketContent content;
}

    ]]></artwork></figure>
    </t>
</section>


            </section>

        <?rfc needLines="8" ?>


        <section anchor="IANA" title="IANA Considerations">

<t>This specification will request at an opportune time multiple registry
    points to exchange protocol packets in a standardized way, amongst them
    multicast address assignments and standard port numbers. The schema itself
    defines many values and codepoints which can be considered registries
    themselves.
    </t>
        </section>


        <section anchor="Acknowledgments" title="Acknowledgments">
            <t>Many thanks to Naiming Shen for some of the early
                discussions around
                the topic of using IGPs for routing in topologies related to Clos.
                Russ White to be especially acknowledged for the key
                conversation on epistomology that allowed to tie current
                asynchronous distributed systems theory results to a modern protocol
                design presented here.
                Adrian Farrel, Joel Halpern and Jeffrey Zhang
                provided thoughtful comments that improved the
                readability of the document and found good amount of
                corners where the light failed to shine. Kris Price was
                first to mention single router, single arm default considerations.
                Jeff Tantsura helped out with some initial thoughts on BFD
                interactions while Jeff Haas corrected several misconceptions
                about BFD's finer points. Artur Makutunowicz pointed out
                many possible improvements and acted as sounding
                board in regard to modern protocol implementation techniques
                RIFT is exploring. Barak Gafni formalized first time clearly
                the problem of partitioned spine on a
                (clean) napkin in Singapore.
            </t>
        </section>


    </middle>


    <back>
        <!-- References split into informative and normative -->

        <!-- There are 2 ways to insert reference entries from the citation libraries:
         1. define an ENTITY at the top, and use "ampersand character"RFC2629; here (as shown)
         2. simply use a PI "less than character"?rfc include="reference.RFC.2119.xml"?> here
         (for I-Ds: include="reference.I-D.narten-iana-considerations-rfc2434bis.xml")

         Both are cited textually in the same manner: by using xref elements.
         If you use the PI option, xml2rfc will, by default, try to find included files in the same
         directory as the including file. You can also define the XML_LIBRARY environment variable
         with a value containing a set of directories to search.  These can be either in the local
         filing system or remote ones accessed by http (http://domain/dir/... ).-->

        <references title="Normative References">
            <!--?rfc include="http://xml.resource.org/public/rfc/bibxml/reference.RFC.2119.xml"?-->


            <reference anchor="ISO10589">
                <front>
                    <title> Intermediate system to Intermediate system
                        intra-domain
                        routeing information exchange protocol for use
                        in conjunction with
                        the protocol for providing the connectionless-mode
                        Network Service
                        (ISO 8473), ISO/IEC 10589:2002, Second Edition.</title>
                    
                    <author>
                        <organization>ISO &quot;International Organization for
                            Standardization&quot;</organization>
                    </author>
                    <date month="Nov" year="2002"/>
                </front>
            </reference>

&RFC1142;
&RFC2119;
&RFC2328;
&RFC2365;
&RFC4271;
&RFC4291;
&RFC4655;
&RFC5120;
&RFC5303;
&RFC5709;
&RFC5881;
&RFC6234;
&RFC6822;
&RFC7855;
&RFC7938;
&RFC7987;
        </references>

        <references title="Informative References">
            <!-- Here we use entities that we defined at the beginning. -->

        <?rfc include="reference.I-D.ietf-spring-segment-routing"?>

            <reference anchor="CLOS">
                <front>
                    <title>On Nonblocking Folded-Clos Networks in
                        Computer Communication Environments</title>
                    <author initials="X." surname="Yuan">
                        <organization>IEEE International Parallel &amp;
                            Distributed Processing Symposium</organization>
                    </author>
                    <date  year="2011"/>
                </front>
                <seriesInfo name="IEEE" value="International Parallel &amp; 
                Distributed Processing Symposium"/>
            </reference>

<reference anchor="DIJKSTRA">
    <front>
        <title>A Note on Two Problems in Connexion with Graphs</title>
        <author initials="E. W." surname="Dijkstra">
            <organization></organization>
        </author>

        <date  year="1959"/>
    </front>
    <seriesInfo name="Journal Numer. Math." value=""/>
</reference>

<reference anchor="DYNAMO">
    <front>
        <title>Dynamo: amazon's highly available key-value store</title>
        <author  initials="G." surname="De Candia et al.">
            <organization></organization>
        </author><date  year="2007"/>
    </front>
    <seriesInfo name="ACM" value="SIGOPS symposium on Operating systems principles (SOSP '07)"/>
</reference>

<reference anchor="EPPSTEIN">
    <front>
        <title>Finding the k-Shortest Paths</title>
        <author initials="D" surname="Eppstein">
            <organization>USC
            </organization>
        </author>

        <date  year="1997"/>
    </front>

</reference>

            <reference anchor="FATTREE">
                <front>
                    <title>Fat-Trees: Universal Networks for Hardware-Efficient
                        Supercomputing</title>
                    <author initials="C. E." surname="Leiserson">
                        <organization>IEEE Transactions on Computers</organization>
                    </author>
                    <date  year="1985"/>
                </front>
                
            </reference>
            
            <reference anchor="VAHDAT08">
                <front>
                    <title>A Scalable, Commodity Data Center Network
                        Architecture</title>
                    <author initials="M." surname="Al-Fares">
                        <organization>USC</organization>
                    </author>
                    
                    <author initials="A." surname="Loukissas">
                        <organization>USC</organization>
                    </author>
                    
                    <author initials="A." surname="Vahdat">
                        <organization>USC</organization>
                    </author>
                    <date  year="2008"/>
                </front>
                <seriesInfo name="SIGCOMM" value=""/>
            </reference>
            



<reference anchor="MAKSIC2013">
    <front>
        <title>Improving Utilization of Data Center Networks</title>
        <author  initials="N." surname="Maksic et al.">
            <organization></organization>
        </author><date  month="Nov" year="2013"/>
    </front>
    <seriesInfo name="IEEE" value="Communications Magazine"/>
</reference>


<reference anchor="QUIC">
    <front>
        <title>QUIC: A UDP-Based Multiplexed and Secure Transport</title>
        <author initials="J." surname="Iyengar et al.">
            <organization>IETF</organization></author>
        <date year="2016"/>
    </front>
</reference>

<reference anchor="PROTOBUF">
    <front>
        <title>Protocol Buffers,
            https://developers.google.com/protocol-buffers</title>
        <author>
            <organization>Google, Inc.</organization>
            </author>
        <date/>
    </front>
</reference>

        </references>

    </back>
</rfc>
