aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDmitry Fleytman <dmitry@daynix.com>2013-03-09 11:21:04 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2013-03-25 11:13:10 +0100
commit75020a7021513ad4cbad2aa5f6de5d390016f099 (patch)
tree6c3e150e88e7e9144ca777dbb5b608874fb8e276
parent84026301694b98dd08272e613da3497b17023d5c (diff)
Common definitions for VMWARE devices
Signed-off-by: Dmitry Fleytman <dmitry@daynix.com> Signed-off-by: Yan Vugenfirer <yan@daynix.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--hw/vmware_utils.h143
-rw-r--r--hw/vmxnet_debug.h115
-rw-r--r--include/net/eth.h347
-rw-r--r--net/Makefile.objs1
-rw-r--r--net/eth.c217
5 files changed, 823 insertions, 0 deletions
diff --git a/hw/vmware_utils.h b/hw/vmware_utils.h
new file mode 100644
index 0000000000..5307e2ccc9
--- /dev/null
+++ b/hw/vmware_utils.h
@@ -0,0 +1,143 @@
+/*
+ * QEMU VMWARE paravirtual devices - auxiliary code
+ *
+ * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
+ *
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef VMWARE_UTILS_H
+#define VMWARE_UTILS_H
+
+#include "qemu/range.h"
+
+#ifndef VMW_SHPRN
+#define VMW_SHPRN(fmt, ...) do {} while (0)
+#endif
+
+/*
+ * Shared memory access functions with byte swap support
+ * Each function contains printout for reverse-engineering needs
+ *
+ */
+static inline void
+vmw_shmem_read(hwaddr addr, void *buf, int len)
+{
+ VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
+ cpu_physical_memory_read(addr, buf, len);
+}
+
+static inline void
+vmw_shmem_write(hwaddr addr, void *buf, int len)
+{
+ VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
+ cpu_physical_memory_write(addr, buf, len);
+}
+
+static inline void
+vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
+{
+ VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
+ addr, len, buf, is_write);
+
+ cpu_physical_memory_rw(addr, buf, len, is_write);
+}
+
+static inline void
+vmw_shmem_set(hwaddr addr, uint8 val, int len)
+{
+ int i;
+ VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
+
+ for (i = 0; i < len; i++) {
+ cpu_physical_memory_write(addr + i, &val, 1);
+ }
+}
+
+static inline uint32_t
+vmw_shmem_ld8(hwaddr addr)
+{
+ uint8_t res = ldub_phys(addr);
+ VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
+ return res;
+}
+
+static inline void
+vmw_shmem_st8(hwaddr addr, uint8_t value)
+{
+ VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
+ stb_phys(addr, value);
+}
+
+static inline uint32_t
+vmw_shmem_ld16(hwaddr addr)
+{
+ uint16_t res = lduw_le_phys(addr);
+ VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
+ return res;
+}
+
+static inline void
+vmw_shmem_st16(hwaddr addr, uint16_t value)
+{
+ VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
+ stw_le_phys(addr, value);
+}
+
+static inline uint32_t
+vmw_shmem_ld32(hwaddr addr)
+{
+ uint32_t res = ldl_le_phys(addr);
+ VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
+ return res;
+}
+
+static inline void
+vmw_shmem_st32(hwaddr addr, uint32_t value)
+{
+ VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
+ stl_le_phys(addr, value);
+}
+
+static inline uint64_t
+vmw_shmem_ld64(hwaddr addr)
+{
+ uint64_t res = ldq_le_phys(addr);
+ VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
+ return res;
+}
+
+static inline void
+vmw_shmem_st64(hwaddr addr, uint64_t value)
+{
+ VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
+ stq_le_phys(addr, value);
+}
+
+/* Macros for simplification of operations on array-style registers */
+
+/*
+ * Whether <addr> lies inside of array-style register defined by <base>,
+ * number of elements (<cnt>) and element size (<regsize>)
+ *
+*/
+#define VMW_IS_MULTIREG_ADDR(addr, base, cnt, regsize) \
+ range_covers_byte(base, cnt * regsize, addr)
+
+/*
+ * Returns index of given register (<addr>) in array-style register defined by
+ * <base> and element size (<regsize>)
+ *
+*/
+#define VMW_MULTIREG_IDX_BY_ADDR(addr, base, regsize) \
+ (((addr) - (base)) / (regsize))
+
+#endif
diff --git a/hw/vmxnet_debug.h b/hw/vmxnet_debug.h
new file mode 100644
index 0000000000..96dae0f916
--- /dev/null
+++ b/hw/vmxnet_debug.h
@@ -0,0 +1,115 @@
+/*
+ * QEMU VMWARE VMXNET* paravirtual NICs - debugging facilities
+ *
+ * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
+ *
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Tamir Shomer <tamirs@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef _QEMU_VMXNET_DEBUG_H
+#define _QEMU_VMXNET_DEBUG_H
+
+#define VMXNET_DEVICE_NAME "vmxnet3"
+
+/* #define VMXNET_DEBUG_CB */
+#define VMXNET_DEBUG_WARNINGS
+#define VMXNET_DEBUG_ERRORS
+/* #define VMXNET_DEBUG_INTERRUPTS */
+/* #define VMXNET_DEBUG_CONFIG */
+/* #define VMXNET_DEBUG_RINGS */
+/* #define VMXNET_DEBUG_PACKETS */
+/* #define VMXNET_DEBUG_SHMEM_ACCESS */
+
+#ifdef VMXNET_DEBUG_SHMEM_ACCESS
+#define VMW_SHPRN(fmt, ...) \
+ do { \
+ printf("[%s][SH][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_SHPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_CB
+#define VMW_CBPRN(fmt, ...) \
+ do { \
+ printf("[%s][CB][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_CBPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_PACKETS
+#define VMW_PKPRN(fmt, ...) \
+ do { \
+ printf("[%s][PK][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_PKPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_WARNINGS
+#define VMW_WRPRN(fmt, ...) \
+ do { \
+ printf("[%s][WR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_WRPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_ERRORS
+#define VMW_ERPRN(fmt, ...) \
+ do { \
+ printf("[%s][ER][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_ERPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_INTERRUPTS
+#define VMW_IRPRN(fmt, ...) \
+ do { \
+ printf("[%s][IR][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_IRPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_CONFIG
+#define VMW_CFPRN(fmt, ...) \
+ do { \
+ printf("[%s][CF][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_CFPRN(fmt, ...) do {} while (0)
+#endif
+
+#ifdef VMXNET_DEBUG_RINGS
+#define VMW_RIPRN(fmt, ...) \
+ do { \
+ printf("[%s][RI][%s]: " fmt "\n", VMXNET_DEVICE_NAME, __func__, \
+ ## __VA_ARGS__); \
+ } while (0)
+#else
+#define VMW_RIPRN(fmt, ...) do {} while (0)
+#endif
+
+#define VMXNET_MF "%02X:%02X:%02X:%02X:%02X:%02X"
+#define VMXNET_MA(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
+
+#endif /* _QEMU_VMXNET3_DEBUG_H */
diff --git a/include/net/eth.h b/include/net/eth.h
new file mode 100644
index 0000000000..1d48e06b22
--- /dev/null
+++ b/include/net/eth.h
@@ -0,0 +1,347 @@
+/*
+ * QEMU network structures definitions and helper functions
+ *
+ * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
+ *
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Portions developed by Free Software Foundation, Inc
+ * Copyright (C) 1991-1997, 2001, 2003, 2006 Free Software Foundation, Inc.
+ * See netinet/ip6.h and netinet/in.h (GNU C Library)
+ *
+ * Portions developed by Igor Kovalenko
+ * Copyright (c) 2006 Igor Kovalenko
+ * See hw/rtl8139.c (QEMU)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Tamir Shomer <tamirs@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef QEMU_ETH_H
+#define QEMU_ETH_H
+
+#include <sys/types.h>
+#include <string.h>
+#include "qemu/bswap.h"
+#include "qemu/iov.h"
+
+#define ETH_ALEN 6
+
+struct eth_header {
+ uint8_t h_dest[ETH_ALEN]; /* destination eth addr */
+ uint8_t h_source[ETH_ALEN]; /* source ether addr */
+ uint16_t h_proto; /* packet type ID field */
+};
+
+struct vlan_header {
+ uint16_t h_tci; /* priority and VLAN ID */
+ uint16_t h_proto; /* encapsulated protocol */
+};
+
+struct ip_header {
+ uint8_t ip_ver_len; /* version and header length */
+ uint8_t ip_tos; /* type of service */
+ uint16_t ip_len; /* total length */
+ uint16_t ip_id; /* identification */
+ uint16_t ip_off; /* fragment offset field */
+ uint8_t ip_ttl; /* time to live */
+ uint8_t ip_p; /* protocol */
+ uint16_t ip_sum; /* checksum */
+ uint32_t ip_src, ip_dst; /* source and destination address */
+};
+
+typedef struct tcp_header {
+ uint16_t th_sport; /* source port */
+ uint16_t th_dport; /* destination port */
+ uint32_t th_seq; /* sequence number */
+ uint32_t th_ack; /* acknowledgment number */
+ uint16_t th_offset_flags; /* data offset, reserved 6 bits, */
+ /* TCP protocol flags */
+ uint16_t th_win; /* window */
+ uint16_t th_sum; /* checksum */
+ uint16_t th_urp; /* urgent pointer */
+} tcp_header;
+
+typedef struct udp_header {
+ uint16_t uh_sport; /* source port */
+ uint16_t uh_dport; /* destination port */
+ uint16_t uh_ulen; /* udp length */
+ uint16_t uh_sum; /* udp checksum */
+} udp_header;
+
+typedef struct ip_pseudo_header {
+ uint32_t ip_src;
+ uint32_t ip_dst;
+ uint8_t zeros;
+ uint8_t ip_proto;
+ uint16_t ip_payload;
+} ip_pseudo_header;
+
+/* IPv6 address */
+struct in6_addr {
+ union {
+ uint8_t __u6_addr8[16];
+ } __in6_u;
+};
+
+struct ip6_header {
+ union {
+ struct ip6_hdrctl {
+ uint32_t ip6_un1_flow; /* 4 bits version, 8 bits TC,
+ 20 bits flow-ID */
+ uint16_t ip6_un1_plen; /* payload length */
+ uint8_t ip6_un1_nxt; /* next header */
+ uint8_t ip6_un1_hlim; /* hop limit */
+ } ip6_un1;
+ uint8_t ip6_un2_vfc; /* 4 bits version, top 4 bits tclass */
+ struct ip6_ecn_access {
+ uint8_t ip6_un3_vfc; /* 4 bits version, top 4 bits tclass */
+ uint8_t ip6_un3_ecn; /* 2 bits ECN, top 6 bits payload length */
+ } ip6_un3;
+ } ip6_ctlun;
+ struct in6_addr ip6_src; /* source address */
+ struct in6_addr ip6_dst; /* destination address */
+};
+
+struct ip6_ext_hdr {
+ uint8_t ip6r_nxt; /* next header */
+ uint8_t ip6r_len; /* length in units of 8 octets */
+};
+
+struct udp_hdr {
+ uint16_t uh_sport; /* source port */
+ uint16_t uh_dport; /* destination port */
+ uint16_t uh_ulen; /* udp length */
+ uint16_t uh_sum; /* udp checksum */
+};
+
+struct tcp_hdr {
+ u_short th_sport; /* source port */
+ u_short th_dport; /* destination port */
+ uint32_t th_seq; /* sequence number */
+ uint32_t th_ack; /* acknowledgment number */
+#ifdef HOST_WORDS_BIGENDIAN
+ u_char th_off : 4, /* data offset */
+ th_x2:4; /* (unused) */
+#else
+ u_char th_x2 : 4, /* (unused) */
+ th_off:4; /* data offset */
+#endif
+
+#define TH_ELN 0x1 /* explicit loss notification */
+#define TH_ECN 0x2 /* explicit congestion notification */
+#define TH_FS 0x4 /* fast start */
+
+ u_char th_flags;
+#define TH_FIN 0x01
+#define TH_SYN 0x02
+#define TH_RST 0x04
+#define TH_PUSH 0x08
+#define TH_ACK 0x10
+#define TH_URG 0x20
+ u_short th_win; /* window */
+ u_short th_sum; /* checksum */
+ u_short th_urp; /* urgent pointer */
+};
+
+#define ip6_nxt ip6_ctlun.ip6_un1.ip6_un1_nxt
+#define ip6_ecn_acc ip6_ctlun.ip6_un3.ip6_un3_ecn
+
+#define PKT_GET_ETH_HDR(p) \
+ ((struct eth_header *)(p))
+#define PKT_GET_VLAN_HDR(p) \
+ ((struct vlan_header *) (((uint8_t *)(p)) + sizeof(struct eth_header)))
+#define PKT_GET_DVLAN_HDR(p) \
+ (PKT_GET_VLAN_HDR(p) + 1)
+#define PKT_GET_IP_HDR(p) \
+ ((struct ip_header *)(((uint8_t *)(p)) + eth_get_l2_hdr_length(p)))
+#define IP_HDR_GET_LEN(p) \
+ ((((struct ip_header *)p)->ip_ver_len & 0x0F) << 2)
+#define PKT_GET_IP_HDR_LEN(p) \
+ (IP_HDR_GET_LEN(PKT_GET_IP_HDR(p)))
+#define PKT_GET_IP6_HDR(p) \
+ ((struct ip6_header *) (((uint8_t *)(p)) + eth_get_l2_hdr_length(p)))
+#define IP_HEADER_VERSION(ip) \
+ ((ip->ip_ver_len >> 4)&0xf)
+
+#define ETH_P_IP (0x0800)
+#define ETH_P_IPV6 (0x86dd)
+#define ETH_P_VLAN (0x8100)
+#define ETH_P_DVLAN (0x88a8)
+#define VLAN_VID_MASK 0x0fff
+#define IP_HEADER_VERSION_4 (4)
+#define IP_HEADER_VERSION_6 (6)
+#define IP_PROTO_TCP (6)
+#define IP_PROTO_UDP (17)
+#define IPTOS_ECN_MASK 0x03
+#define IPTOS_ECN(x) ((x) & IPTOS_ECN_MASK)
+#define IPTOS_ECN_CE 0x03
+#define IP6_ECN_MASK 0xC0
+#define IP6_ECN(x) ((x) & IP6_ECN_MASK)
+#define IP6_ECN_CE 0xC0
+#define IP4_DONT_FRAGMENT_FLAG (1 << 14)
+
+#define IS_SPECIAL_VLAN_ID(x) \
+ (((x) == 0) || ((x) == 0xFFF))
+
+#define ETH_MAX_L2_HDR_LEN \
+ (sizeof(struct eth_header) + 2 * sizeof(struct vlan_header))
+
+#define ETH_MAX_IP4_HDR_LEN (60)
+#define ETH_MAX_IP_DGRAM_LEN (0xFFFF)
+
+#define IP_FRAG_UNIT_SIZE (8)
+#define IP_FRAG_ALIGN_SIZE(x) ((x) & ~0x7)
+#define IP_RF 0x8000 /* reserved fragment flag */
+#define IP_DF 0x4000 /* don't fragment flag */
+#define IP_MF 0x2000 /* more fragments flag */
+#define IP_OFFMASK 0x1fff /* mask for fragmenting bits */
+
+#define IP6_EXT_GRANULARITY (8) /* Size granularity for
+ IPv6 extension headers */
+
+/* IP6 extension header types */
+#define IP6_HOP_BY_HOP (0)
+#define IP6_ROUTING (43)
+#define IP6_FRAGMENT (44)
+#define IP6_ESP (50)
+#define IP6_AUTHENTICATION (51)
+#define IP6_NONE (59)
+#define IP6_DESTINATON (60)
+#define IP6_MOBILITY (135)
+
+static inline int is_multicast_ether_addr(const uint8_t *addr)
+{
+ return 0x01 & addr[0];
+}
+
+static inline int is_broadcast_ether_addr(const uint8_t *addr)
+{
+ return (addr[0] & addr[1] & addr[2] & addr[3] & addr[4] & addr[5]) == 0xff;
+}
+
+static inline int is_unicast_ether_addr(const uint8_t *addr)
+{
+ return !is_multicast_ether_addr(addr);
+}
+
+typedef enum {
+ ETH_PKT_UCAST = 0xAABBCC00,
+ ETH_PKT_BCAST,
+ ETH_PKT_MCAST
+} eth_pkt_types_e;
+
+static inline eth_pkt_types_e
+get_eth_packet_type(const struct eth_header *ehdr)
+{
+ if (is_broadcast_ether_addr(ehdr->h_dest)) {
+ return ETH_PKT_BCAST;
+ } else if (is_multicast_ether_addr(ehdr->h_dest)) {
+ return ETH_PKT_MCAST;
+ } else { /* unicast */
+ return ETH_PKT_UCAST;
+ }
+}
+
+static inline uint32_t
+eth_get_l2_hdr_length(const void *p)
+{
+ uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
+ struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
+ switch (proto) {
+ case ETH_P_VLAN:
+ return sizeof(struct eth_header) + sizeof(struct vlan_header);
+ case ETH_P_DVLAN:
+ if (hvlan->h_proto == ETH_P_VLAN) {
+ return sizeof(struct eth_header) + 2 * sizeof(struct vlan_header);
+ } else {
+ return sizeof(struct eth_header) + sizeof(struct vlan_header);
+ }
+ default:
+ return sizeof(struct eth_header);
+ }
+}
+
+static inline uint16_t
+eth_get_pkt_tci(const void *p)
+{
+ uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
+ struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
+ switch (proto) {
+ case ETH_P_VLAN:
+ case ETH_P_DVLAN:
+ return be16_to_cpu(hvlan->h_tci);
+ default:
+ return 0;
+ }
+}
+
+static inline bool
+eth_strip_vlan(const void *p, uint8_t *new_ehdr_buf,
+ uint16_t *payload_offset, uint16_t *tci)
+{
+ uint16_t proto = be16_to_cpu(PKT_GET_ETH_HDR(p)->h_proto);
+ struct vlan_header *hvlan = PKT_GET_VLAN_HDR(p);
+ struct eth_header *new_ehdr = (struct eth_header *) new_ehdr_buf;
+
+ switch (proto) {
+ case ETH_P_VLAN:
+ case ETH_P_DVLAN:
+ memcpy(new_ehdr->h_source, PKT_GET_ETH_HDR(p)->h_source, ETH_ALEN);
+ memcpy(new_ehdr->h_dest, PKT_GET_ETH_HDR(p)->h_dest, ETH_ALEN);
+ new_ehdr->h_proto = hvlan->h_proto;
+ *tci = be16_to_cpu(hvlan->h_tci);
+ *payload_offset =
+ sizeof(struct eth_header) + sizeof(struct vlan_header);
+ if (be16_to_cpu(new_ehdr->h_proto) == ETH_P_VLAN) {
+ memcpy(PKT_GET_VLAN_HDR(new_ehdr),
+ PKT_GET_DVLAN_HDR(p),
+ sizeof(struct vlan_header));
+ *payload_offset += sizeof(struct vlan_header);
+ }
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline uint16_t
+eth_get_l3_proto(const void *l2hdr, size_t l2hdr_len)
+{
+ uint8_t *proto_ptr = (uint8_t *) l2hdr + l2hdr_len - sizeof(uint16_t);
+ return be16_to_cpup((uint16_t *)proto_ptr);
+}
+
+void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
+ bool *is_new);
+
+uint8_t eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto);
+
+void eth_get_protocols(const uint8_t *headers,
+ uint32_t hdr_length,
+ bool *isip4, bool *isip6,
+ bool *isudp, bool *istcp);
+
+void eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
+ void *l3hdr, size_t l3hdr_len,
+ size_t l3payload_len,
+ size_t frag_offset, bool more_frags);
+
+void
+eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len);
+
+uint32_t
+eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl);
+
+bool
+eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags,
+ size_t ip6hdr_off, uint8_t *l4proto,
+ size_t *full_hdr_len);
+
+#endif
diff --git a/net/Makefile.objs b/net/Makefile.objs
index a08cd14e2e..4854a14fe4 100644
--- a/net/Makefile.objs
+++ b/net/Makefile.objs
@@ -1,6 +1,7 @@
common-obj-y = net.o queue.o checksum.o util.o hub.o
common-obj-y += socket.o
common-obj-y += dump.o
+common-obj-y += eth.o
common-obj-$(CONFIG_POSIX) += tap.o
common-obj-$(CONFIG_LINUX) += tap-linux.o
common-obj-$(CONFIG_WIN32) += tap-win32.o
diff --git a/net/eth.c b/net/eth.c
new file mode 100644
index 0000000000..1d7494d5e5
--- /dev/null
+++ b/net/eth.c
@@ -0,0 +1,217 @@
+/*
+ * QEMU network structures definitions and helper functions
+ *
+ * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
+ *
+ * Developed by Daynix Computing LTD (http://www.daynix.com)
+ *
+ * Authors:
+ * Dmitry Fleytman <dmitry@daynix.com>
+ * Tamir Shomer <tamirs@daynix.com>
+ * Yan Vugenfirer <yan@daynix.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "net/eth.h"
+#include "net/checksum.h"
+#include "qemu-common.h"
+#include "net/tap.h"
+
+void eth_setup_vlan_headers(struct eth_header *ehdr, uint16_t vlan_tag,
+ bool *is_new)
+{
+ struct vlan_header *vhdr = PKT_GET_VLAN_HDR(ehdr);
+
+ switch (be16_to_cpu(ehdr->h_proto)) {
+ case ETH_P_VLAN:
+ case ETH_P_DVLAN:
+ /* vlan hdr exists */
+ *is_new = false;
+ break;
+
+ default:
+ /* No VLAN header, put a new one */
+ vhdr->h_proto = ehdr->h_proto;
+ ehdr->h_proto = cpu_to_be16(ETH_P_VLAN);
+ *is_new = true;
+ break;
+ }
+ vhdr->h_tci = cpu_to_be16(vlan_tag);
+}
+
+uint8_t
+eth_get_gso_type(uint16_t l3_proto, uint8_t *l3_hdr, uint8_t l4proto)
+{
+ uint8_t ecn_state = 0;
+
+ if (l3_proto == ETH_P_IP) {
+ struct ip_header *iphdr = (struct ip_header *) l3_hdr;
+
+ if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
+ if (IPTOS_ECN(iphdr->ip_tos) == IPTOS_ECN_CE) {
+ ecn_state = VIRTIO_NET_HDR_GSO_ECN;
+ }
+ if (l4proto == IP_PROTO_TCP) {
+ return VIRTIO_NET_HDR_GSO_TCPV4 | ecn_state;
+ } else if (l4proto == IP_PROTO_UDP) {
+ return VIRTIO_NET_HDR_GSO_UDP | ecn_state;
+ }
+ }
+ } else if (l3_proto == ETH_P_IPV6) {
+ struct ip6_header *ip6hdr = (struct ip6_header *) l3_hdr;
+
+ if (IP6_ECN(ip6hdr->ip6_ecn_acc) == IP6_ECN_CE) {
+ ecn_state = VIRTIO_NET_HDR_GSO_ECN;
+ }
+
+ if (l4proto == IP_PROTO_TCP) {
+ return VIRTIO_NET_HDR_GSO_TCPV6 | ecn_state;
+ }
+ }
+
+ /* Unsupported offload */
+ assert(false);
+
+ return VIRTIO_NET_HDR_GSO_NONE | ecn_state;
+}
+
+void eth_get_protocols(const uint8_t *headers,
+ uint32_t hdr_length,
+ bool *isip4, bool *isip6,
+ bool *isudp, bool *istcp)
+{
+ int proto;
+ size_t l2hdr_len = eth_get_l2_hdr_length(headers);
+ assert(hdr_length >= eth_get_l2_hdr_length(headers));
+ *isip4 = *isip6 = *isudp = *istcp = false;
+
+ proto = eth_get_l3_proto(headers, l2hdr_len);
+ if (proto == ETH_P_IP) {
+ *isip4 = true;
+
+ struct ip_header *iphdr;
+
+ assert(hdr_length >=
+ eth_get_l2_hdr_length(headers) + sizeof(struct ip_header));
+
+ iphdr = PKT_GET_IP_HDR(headers);
+
+ if (IP_HEADER_VERSION(iphdr) == IP_HEADER_VERSION_4) {
+ if (iphdr->ip_p == IP_PROTO_TCP) {
+ *istcp = true;
+ } else if (iphdr->ip_p == IP_PROTO_UDP) {
+ *isudp = true;
+ }
+ }
+ } else if (proto == ETH_P_IPV6) {
+ uint8_t l4proto;
+ size_t full_ip6hdr_len;
+
+ struct iovec hdr_vec;
+ hdr_vec.iov_base = (void *) headers;
+ hdr_vec.iov_len = hdr_length;
+
+ *isip6 = true;
+ if (eth_parse_ipv6_hdr(&hdr_vec, 1, l2hdr_len,
+ &l4proto, &full_ip6hdr_len)) {
+ if (l4proto == IP_PROTO_TCP) {
+ *istcp = true;
+ } else if (l4proto == IP_PROTO_UDP) {
+ *isudp = true;
+ }
+ }
+ }
+}
+
+void
+eth_setup_ip4_fragmentation(const void *l2hdr, size_t l2hdr_len,
+ void *l3hdr, size_t l3hdr_len,
+ size_t l3payload_len,
+ size_t frag_offset, bool more_frags)
+{
+ if (eth_get_l3_proto(l2hdr, l2hdr_len) == ETH_P_IP) {
+ uint16_t orig_flags;
+ struct ip_header *iphdr = (struct ip_header *) l3hdr;
+ uint16_t frag_off_units = frag_offset / IP_FRAG_UNIT_SIZE;
+ uint16_t new_ip_off;
+
+ assert(frag_offset % IP_FRAG_UNIT_SIZE == 0);
+ assert((frag_off_units & ~IP_OFFMASK) == 0);
+
+ orig_flags = be16_to_cpu(iphdr->ip_off) & ~(IP_OFFMASK|IP_MF);
+ new_ip_off = frag_off_units | orig_flags | (more_frags ? IP_MF : 0);
+ iphdr->ip_off = cpu_to_be16(new_ip_off);
+ iphdr->ip_len = cpu_to_be16(l3payload_len + l3hdr_len);
+ }
+}
+
+void
+eth_fix_ip4_checksum(void *l3hdr, size_t l3hdr_len)
+{
+ struct ip_header *iphdr = (struct ip_header *) l3hdr;
+ iphdr->ip_sum = 0;
+ iphdr->ip_sum = cpu_to_be16(net_raw_checksum(l3hdr, l3hdr_len));
+}
+
+uint32_t
+eth_calc_pseudo_hdr_csum(struct ip_header *iphdr, uint16_t csl)
+{
+ struct ip_pseudo_header ipph;
+ ipph.ip_src = iphdr->ip_src;
+ ipph.ip_dst = iphdr->ip_dst;
+ ipph.ip_payload = cpu_to_be16(csl);
+ ipph.ip_proto = iphdr->ip_p;
+ ipph.zeros = 0;
+ return net_checksum_add(sizeof(ipph), (uint8_t *) &ipph);
+}
+
+static bool
+eth_is_ip6_extension_header_type(uint8_t hdr_type)
+{
+ switch (hdr_type) {
+ case IP6_HOP_BY_HOP:
+ case IP6_ROUTING:
+ case IP6_FRAGMENT:
+ case IP6_ESP:
+ case IP6_AUTHENTICATION:
+ case IP6_DESTINATON:
+ case IP6_MOBILITY:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool eth_parse_ipv6_hdr(struct iovec *pkt, int pkt_frags,
+ size_t ip6hdr_off, uint8_t *l4proto,
+ size_t *full_hdr_len)
+{
+ struct ip6_header ip6_hdr;
+ struct ip6_ext_hdr ext_hdr;
+ size_t bytes_read;
+
+ bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off,
+ &ip6_hdr, sizeof(ip6_hdr));
+ if (bytes_read < sizeof(ip6_hdr)) {
+ return false;
+ }
+
+ *full_hdr_len = sizeof(struct ip6_header);
+
+ if (!eth_is_ip6_extension_header_type(ip6_hdr.ip6_nxt)) {
+ *l4proto = ip6_hdr.ip6_nxt;
+ return true;
+ }
+
+ do {
+ bytes_read = iov_to_buf(pkt, pkt_frags, ip6hdr_off + *full_hdr_len,
+ &ext_hdr, sizeof(ext_hdr));
+ *full_hdr_len += (ext_hdr.ip6r_len + 1) * IP6_EXT_GRANULARITY;
+ } while (eth_is_ip6_extension_header_type(ext_hdr.ip6r_nxt));
+
+ *l4proto = ext_hdr.ip6r_nxt;
+ return true;
+}