Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 1 | /****************************************************************************** |
| 2 | * netif.h |
| 3 | * |
| 4 | * Unified network-device I/O interface for Xen guest OSes. |
| 5 | * |
| 6 | * Copyright (c) 2003-2004, Keir Fraser |
| 7 | */ |
| 8 | |
| 9 | #ifndef __XEN_PUBLIC_IO_NETIF_H__ |
| 10 | #define __XEN_PUBLIC_IO_NETIF_H__ |
| 11 | |
David Howells | a1ce392 | 2012-10-02 18:01:25 +0100 | [diff] [blame] | 12 | #include <xen/interface/io/ring.h> |
| 13 | #include <xen/interface/grant_table.h> |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 14 | |
| 15 | /* |
Wei Liu | 2810e5b | 2013-04-22 02:20:42 +0000 | [diff] [blame] | 16 | * Older implementation of Xen network frontend / backend has an |
| 17 | * implicit dependency on the MAX_SKB_FRAGS as the maximum number of |
| 18 | * ring slots a skb can use. Netfront / netback may not work as |
| 19 | * expected when frontend and backend have different MAX_SKB_FRAGS. |
| 20 | * |
| 21 | * A better approach is to add mechanism for netfront / netback to |
| 22 | * negotiate this value. However we cannot fix all possible |
| 23 | * frontends, so we need to define a value which states the minimum |
| 24 | * slots backend must support. |
| 25 | * |
| 26 | * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS |
| 27 | * (18), which is proved to work with most frontends. Any new backend |
| 28 | * which doesn't negotiate with frontend should expect frontend to |
| 29 | * send a valid packet using slots up to this value. |
| 30 | */ |
| 31 | #define XEN_NETIF_NR_SLOTS_MIN 18 |
| 32 | |
| 33 | /* |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 34 | * Notifications after enqueuing any type of message should be conditional on |
| 35 | * the appropriate req_event or rsp_event field in the shared ring. |
| 36 | * If the client sends notification for rx requests then it should specify |
| 37 | * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume |
| 38 | * that it cannot safely queue packets (as it may not be kicked to send them). |
| 39 | */ |
| 40 | |
Wei Liu | a5560a6 | 2013-05-22 06:34:47 +0000 | [diff] [blame] | 41 | /* |
| 42 | * "feature-split-event-channels" is introduced to separate guest TX |
| 43 | * and RX notificaion. Backend either doesn't support this feature or |
| 44 | * advertise it via xenstore as 0 (disabled) or 1 (enabled). |
| 45 | * |
| 46 | * To make use of this feature, frontend should allocate two event |
| 47 | * channels for TX and RX, advertise them to backend as |
| 48 | * "event-channel-tx" and "event-channel-rx" respectively. If frontend |
| 49 | * doesn't want to use this feature, it just writes "event-channel" |
| 50 | * node as before. |
| 51 | */ |
| 52 | |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 53 | /* |
| 54 | * This is the 'wire' format for packets: |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 55 | * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) |
| 56 | * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) |
| 57 | * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) |
| 58 | * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data |
| 59 | * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 60 | * ... |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 61 | * Request N: xen_netif_tx_request -- 0 |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 62 | */ |
| 63 | |
| 64 | /* Protocol checksum field is blank in the packet (hardware offload)? */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 65 | #define _XEN_NETTXF_csum_blank (0) |
| 66 | #define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 67 | |
| 68 | /* Packet data has been validated against protocol checksum. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 69 | #define _XEN_NETTXF_data_validated (1) |
| 70 | #define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 71 | |
| 72 | /* Packet continues in the next request descriptor. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 73 | #define _XEN_NETTXF_more_data (2) |
| 74 | #define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 75 | |
| 76 | /* Packet to be followed by extra descriptor(s). */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 77 | #define _XEN_NETTXF_extra_info (3) |
| 78 | #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 79 | |
Wei Liu | 9ecd1a7 | 2013-04-22 02:20:41 +0000 | [diff] [blame] | 80 | #define XEN_NETIF_MAX_TX_SIZE 0xFFFF |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 81 | struct xen_netif_tx_request { |
| 82 | grant_ref_t gref; /* Reference to buffer page */ |
| 83 | uint16_t offset; /* Offset within buffer page */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 84 | uint16_t flags; /* XEN_NETTXF_* */ |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 85 | uint16_t id; /* Echoed in response message. */ |
| 86 | uint16_t size; /* Packet size in bytes. */ |
| 87 | }; |
| 88 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 89 | /* Types of xen_netif_extra_info descriptors. */ |
| 90 | #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ |
| 91 | #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ |
| 92 | #define XEN_NETIF_EXTRA_TYPE_MAX (2) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 93 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 94 | /* xen_netif_extra_info flags. */ |
| 95 | #define _XEN_NETIF_EXTRA_FLAG_MORE (0) |
| 96 | #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 97 | |
| 98 | /* GSO types - only TCPv4 currently supported. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 99 | #define XEN_NETIF_GSO_TYPE_TCPV4 (1) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 100 | |
| 101 | /* |
| 102 | * This structure needs to fit within both netif_tx_request and |
| 103 | * netif_rx_response for compatibility. |
| 104 | */ |
| 105 | struct xen_netif_extra_info { |
| 106 | uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ |
| 107 | uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ |
| 108 | |
| 109 | union { |
| 110 | struct { |
| 111 | /* |
| 112 | * Maximum payload size of each segment. For |
| 113 | * example, for TCP this is just the path MSS. |
| 114 | */ |
| 115 | uint16_t size; |
| 116 | |
| 117 | /* |
| 118 | * GSO type. This determines the protocol of |
| 119 | * the packet and any extra features required |
| 120 | * to segment the packet properly. |
| 121 | */ |
| 122 | uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ |
| 123 | |
| 124 | /* Future expansion. */ |
| 125 | uint8_t pad; |
| 126 | |
| 127 | /* |
| 128 | * GSO features. This specifies any extra GSO |
| 129 | * features required to process this packet, |
| 130 | * such as ECN support for TCPv4. |
| 131 | */ |
| 132 | uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ |
| 133 | } gso; |
| 134 | |
| 135 | uint16_t pad[3]; |
| 136 | } u; |
| 137 | }; |
| 138 | |
| 139 | struct xen_netif_tx_response { |
| 140 | uint16_t id; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 141 | int16_t status; /* XEN_NETIF_RSP_* */ |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 142 | }; |
| 143 | |
| 144 | struct xen_netif_rx_request { |
| 145 | uint16_t id; /* Echoed in response message. */ |
| 146 | grant_ref_t gref; /* Reference to incoming granted frame */ |
| 147 | }; |
| 148 | |
| 149 | /* Packet data has been validated against protocol checksum. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 150 | #define _XEN_NETRXF_data_validated (0) |
| 151 | #define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 152 | |
| 153 | /* Protocol checksum field is blank in the packet (hardware offload)? */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 154 | #define _XEN_NETRXF_csum_blank (1) |
| 155 | #define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 156 | |
| 157 | /* Packet continues in the next request descriptor. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 158 | #define _XEN_NETRXF_more_data (2) |
| 159 | #define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 160 | |
| 161 | /* Packet to be followed by extra descriptor(s). */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 162 | #define _XEN_NETRXF_extra_info (3) |
| 163 | #define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) |
| 164 | |
| 165 | /* GSO Prefix descriptor. */ |
| 166 | #define _XEN_NETRXF_gso_prefix (4) |
| 167 | #define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 168 | |
| 169 | struct xen_netif_rx_response { |
| 170 | uint16_t id; |
| 171 | uint16_t offset; /* Offset in page of start of received packet */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 172 | uint16_t flags; /* XEN_NETRXF_* */ |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 173 | int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ |
| 174 | }; |
| 175 | |
| 176 | /* |
| 177 | * Generate netif ring structures and types. |
| 178 | */ |
| 179 | |
| 180 | DEFINE_RING_TYPES(xen_netif_tx, |
| 181 | struct xen_netif_tx_request, |
| 182 | struct xen_netif_tx_response); |
| 183 | DEFINE_RING_TYPES(xen_netif_rx, |
| 184 | struct xen_netif_rx_request, |
| 185 | struct xen_netif_rx_response); |
| 186 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 187 | #define XEN_NETIF_RSP_DROPPED -2 |
| 188 | #define XEN_NETIF_RSP_ERROR -1 |
| 189 | #define XEN_NETIF_RSP_OKAY 0 |
| 190 | /* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ |
| 191 | #define XEN_NETIF_RSP_NULL 1 |
Jeremy Fitzhardinge | a42089d | 2007-07-17 18:37:04 -0700 | [diff] [blame] | 192 | |
| 193 | #endif |